text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from thumbnails.conf import settings
from thumbnails.engines import DummyEngine
from thumbnails.helpers import get_engine, generate_filename, get_cache_backend
from thumbnails.images import SourceFile, Thumbnail
__version__ = '0.5.1'
def get_thumbnail(original, size, **options):
"""
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
"""
engine = get_engine()
cache = get_cache_backend()
original = SourceFile(original)
crop = options.get('crop', None)
options = engine.evaluate_options(options)
thumbnail_name = generate_filename(original, size, crop)
if settings.THUMBNAIL_DUMMY:
engine = DummyEngine()
return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options)
cached = cache.get(thumbnail_name)
force = options is not None and 'force' in options and options['force']
if not force and cached:
return cached
thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options))
if force or not thumbnail.exists:
size = engine.parse_size(size)
thumbnail.image = engine.get_thumbnail(original, size, crop, options)
thumbnail.save(options)
for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS:
resolution_size = engine.calculate_alternative_resolution_size(resolution, size)
image = engine.get_thumbnail(original, resolution_size, crop, options)
thumbnail.save_alternative_resolution(resolution, image, options)
cache.set(thumbnail)
return thumbnail
|
relekang/python-thumbnails
|
thumbnails/__init__.py
|
Python
|
mit
| 2,899 | 0.005174 |
# Copyright (C) 2011, The SAO/NASA Astrophysics Data System
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
File containing global functions
'''
import sys
import os
import time
from pipeline_settings import VERBOSE
def msg(message, verbose=VERBOSE):
"""
Prints a debug message.
"""
#raise "YOU MUST USE THE LOGGING MODULE"
if verbose:
print time.strftime("%Y-%m-%d %H:%M:%S"), '---', message
def manage_check_error(msg_str, type_check, logger):
"""function that prints a warning or
raises an exception according to the type of check"""
from merger.merger_errors import GenericError
if type_check == 'warnings':
logger.warning(' CHECK WARNING: %s' % msg_str)
elif type_check == 'errors':
logger.critical(msg_str)
raise GenericError(msg_str)
else:
error_string = 'Type of check "%s" cannot be handled by the "manage_check_error" function.' % type_check
logger.critical(error_string)
raise GenericError(error_string)
return None
|
adsabs/ADS_records_merger
|
pipeline_log_functions.py
|
Python
|
gpl-3.0
| 1,639 | 0.003661 |
import os
from iotile.core.dev import ComponentRegistry
from iotile.ship.recipe import RecipeObject
from iotile.ship.exceptions import RecipeNotFoundError
class RecipeManager:
"""A class that maintains a list of installed recipes and recipe actions.
It allows fetching recipes by name and auotmatically building RecipeObjects
from textual descriptions.
The RecipeManager maintains a dictionary of RecipeAction objects that it
compiles from all installed iotile packages. It passes this dictionary to
any Recipe that is created from it so the recipe can find any recipe
actions that it needs.
The RecipeManager finds RecipeActions by looking for plugins that
are registered with pkg_resources.
"""
def __init__(self):
self._recipe_actions = {}
self._recipe_resources = {}
self._recipes = {}
reg = ComponentRegistry()
for name, action in reg.load_extensions('iotile.recipe_action', product_name='build_step'):
self._recipe_actions[name] = action
for name, resource in reg.load_extensions('iotile.recipe_resource', product_name='build_resource'):
self._recipe_resources[name] = resource
def is_valid_action(self, name):
"""Check if a name describes a valid action.
Args:
name (str): The name of the action to check
Returns:
bool: Whether the action is known and valid.
"""
return self._recipe_actions.get(name, None) is not None
def is_valid_recipe(self, recipe_name):
"""Check if a recipe is known and valid.
Args:
name (str): The name of the recipe to check
Returns:
bool: Whether the recipe is known and valid.
"""
return self._recipes.get(recipe_name, None) is not None
def add_recipe_folder(self, recipe_folder, whitelist=None):
"""Add all recipes inside a folder to this RecipeManager with an optional whitelist.
Args:
recipe_folder (str): The path to the folder of recipes to add.
whitelist (list): Only include files whose os.basename() matches something
on the whitelist
"""
if whitelist is not None:
whitelist = set(whitelist)
if recipe_folder == '':
recipe_folder = '.'
for yaml_file in [x for x in os.listdir(recipe_folder) if x.endswith('.yaml')]:
if whitelist is not None and yaml_file not in whitelist:
continue
recipe = RecipeObject.FromFile(os.path.join(recipe_folder, yaml_file), self._recipe_actions, self._recipe_resources)
self._recipes[recipe.name] = recipe
for ship_file in [x for x in os.listdir(recipe_folder) if x.endswith('.ship')]:
if whitelist is not None and ship_file not in whitelist:
continue
recipe = RecipeObject.FromArchive(os.path.join(recipe_folder, ship_file), self._recipe_actions, self._recipe_resources)
self._recipes[recipe.name] = recipe
def add_recipe_actions(self, recipe_actions):
"""Add additional valid recipe actions to RecipeManager
args:
recipe_actions (list): List of tuples. First value of tuple is the classname,
second value of tuple is RecipeAction Object
"""
for action_name, action in recipe_actions:
self._recipe_actions[action_name] = action
def get_recipe(self, recipe_name):
"""Get a recipe by name.
Args:
recipe_name (str): The name of the recipe to fetch. Can be either the
yaml file name or the name of the recipe.
"""
if recipe_name.endswith('.yaml'):
recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)
else:
recipe = self._recipes.get(recipe_name)
if recipe is None:
raise RecipeNotFoundError("Could not find recipe", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])
return recipe
|
iotile/coretools
|
iotileship/iotile/ship/recipe_manager.py
|
Python
|
gpl-3.0
| 4,144 | 0.002896 |
from flask import Blueprint, render_template
frontend = Blueprint('frontend', __name__)
@frontend.route('/')
@frontend.route('/index')
def index():
return render_template('index.html')
|
ondoheer/GOT-Platform
|
app/frontend/views.py
|
Python
|
gpl-2.0
| 197 | 0.005076 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import encodeutils
import six
from six.moves.urllib import parse
from eclcli.orchestration.heatclient.common import utils
from eclcli.orchestration.heatclient.openstack.common.apiclient import base
from eclcli.orchestration.heatclient.v1 import stacks
DEFAULT_PAGE_SIZE = 20
class Event(base.Resource):
def __repr__(self):
return "<Event %s>" % self._info
def update(self, **fields):
self.manager.update(self, **fields)
def delete(self):
return self.manager.delete(self)
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
class EventManager(stacks.StackChildManager):
resource_class = Event
def list(self, stack_id, resource_name=None, **kwargs):
"""Get a list of events.
:param stack_id: ID of stack the events belong to
:param resource_name: Optional name of resources to filter events by
:rtype: list of :class:`Event`
"""
params = {}
if 'filters' in kwargs:
filters = kwargs.pop('filters')
params.update(filters)
for key, value in six.iteritems(kwargs):
if value:
params[key] = value
if resource_name is None:
url = '/stacks/%s/events' % stack_id
else:
stack_id = self._resolve_stack_id(stack_id)
url = '/stacks/%s/resources/%s/events' % (
parse.quote(stack_id, ''),
parse.quote(encodeutils.safe_encode(resource_name), ''))
if params:
url += '?%s' % parse.urlencode(params, True)
return self._list(url, 'events')
def get(self, stack_id, resource_name, event_id):
"""Get the details for a specific event.
:param stack_id: ID of stack containing the event
:param resource_name: ID of resource the event belongs to
:param event_id: ID of event to get the details for
"""
stack_id = self._resolve_stack_id(stack_id)
url_str = '/stacks/%s/resources/%s/events/%s' % (
parse.quote(stack_id, ''),
parse.quote(encodeutils.safe_encode(resource_name), ''),
parse.quote(event_id, ''))
resp = self.client.get(url_str)
body = utils.get_response_body(resp)
return Event(self, body.get('event'))
|
nttcom/eclcli
|
eclcli/orchestration/heatclient/v1/events.py
|
Python
|
apache-2.0
| 2,990 | 0 |
import time
import zlib
from bs4 import BeautifulSoup
# from geopy.geocoders import Nominatim as Geo
from scraper import BaseScraper
from price_parser import parse_price_text
from MySQL_connector import db_connector
db = 'realestate_db'
class Parser(object):
scr_db = 'scraper_dumps'
tgt_db = 'realestate_db'
def __init__(self):
self.html = ""
self.address = ""
self.hash_id = 0
self.property_type = ""
self.sub_type = ""
self.ad_id = ""
self.ad_url = ""
self.postcode = ""
self.state = ""
self.price_text = ""
self.open_date = ""
self.room_bed = None
self.room_bath = None
self.room_car = None
self.create_date = ""
self.last_seen_date = ""
self.raw_ad_text = ""
self.price = None
self.agent_name = ""
self.agent_company = ""
self._tgt_db_conn = db_connector(self.tgt_db)
self.cur = self._tgt_db_conn.cursor()
self.write_queue_len = 0
pass
@staticmethod
def _fetchonedict(cur):
data = cur.fetchone()
if data:
rs = {}
for i in range(len(data)):
col = cur.description[i][0]
d = data[i]
rs[col] = d
return rs
else:
return None
def extract_html_text(self, line_num=1000):
"""
query html from source database
call parse function to parse html to structured data
call insert function to insert to target database
:return:
"""
tic = time.time()
# get the parsed list of hash id
conn = db_connector(self.tgt_db)
cur = conn.cursor()
cur.execute("SELECT hash_id FROM tbl_property_ad")
parsed_hash_id = set()
while True:
res = cur.fetchone()
if res:
parsed_hash_id.add(res[0])
else:
break
pass
conn = db_connector(self.scr_db)
cur = conn.cursor()
cur.execute("SELECT * FROM tbl_html_text LIMIT %s", (line_num,))
i = 0
try:
while True:
# each row of data
i += 1
if not(i % 1000):
print "processing %d lines of data. (%f sec)\r" % (i, time.time()-tic)
tic = time.time()
rs = self._fetchonedict(cur)
if isinstance(rs, dict):
# get address only for the first version
# if rs['hash_id'] in parsed_hash_id:
# continue
self.html = zlib.decompress(str(rs["html_text"])).decode("utf-8")
self.hash_id = rs['hash_id']
self.create_date = rs["create_date"]
self.last_seen_date = rs["last_seen_date"]
self.raw_ad_text = rs["ad_text"]
else:
break
# call parse
self.parse_html_text()
self.insert_data()
finally:
self._tgt_db_conn.commit()
self._tgt_db_conn.close()
print "Saving and closing connection."
def parse_html_text(self):
soup = BeautifulSoup(self.html, "html.parser")
# get type
article = soup.article
try:
self.property_type = article["data-content-type"]
except (AttributeError, KeyError):
self.property_type = ""
# get ad id
self.ad_id = ""
try:
self.ad_id = article["id"]
except (AttributeError, KeyError):
self.ad_id = ""
# get url
self.ad_url = ""
if self.ad_id:
url = article.find("a")['href']
assert isinstance(url, basestring)
while url:
if url[0] == "/" and url.find(self.ad_id[1:]):
break
url = article.find("a")['href']
self.ad_url = "www.realestate.com.au"+url
# get subtype
self.sub_type = ""
if self.ad_url:
url_component = url.split("-")
self.sub_type = url_component[1]
# get address
photoviewer = soup.find("div", class_="photoviewer")
if photoviewer:
img = photoviewer.find("img")
try:
self.address = img['title']
except (KeyError, AttributeError):
self.address = ""
print "Could not found address, hash id:", self.hash_id
pass
# what if could not find address in the phtoviewer
# get postcode
self.postcode = ""
if self.address:
postcode = self.address[-4:].strip()
if postcode.isdigit():
self.postcode = postcode
# get state
self.state = ""
if self.postcode:
t = self.address.split(",")
t = t[-1]
state = t.strip().split(" ")[0]
self.state = state.upper()
# get price text
self.price_text = ""
self.price = None
price_text = article.find("p", class_="priceText")
if not price_text:
price_text = article.find("p", class_="contactAgent")
if not price_text:
price_text = article.find("span", class_="price rui-truncate")
if price_text:
self.price_text = price_text.get_text()
self.price = parse_price_text(self.price_text)
if not isinstance(self.price, float):
self.price = None
# todo li, class='badge openTime'
# s = article.find("li", class_="badge openTime")
# if s:
# print s.get_text(), len(article.find_all("li", class_="badge openTime"))
# get rooms
self.room_bed = None
self.room_bath = None
self.room_car = None
rooms = article.find("dl", class_="rui-property-features rui-clearfix")
if rooms:
room_text = rooms.get_text()
# print room_text, "===>", self._parse_rooms(room_text)
self.room_bed, self.room_bath, self.room_car = self._parse_rooms(room_text)
def _parse_rooms(self, room_text):
"""
:return: [1,2,3] for [bed,bath,car]
"""
assert isinstance(room_text, basestring)
rooms = [None, None, None]
s = room_text.split(" ")
while s:
text = s.pop(0)
if text == "Bedrooms":
num = s[0]
if num.isdigit():
s.pop(0)
rooms[0] = num
elif text == "Bathrooms":
num = s[0]
if num.isdigit():
s.pop(0)
rooms[1] = num
elif text == "Car":
if s[0] == "Spaces":
s.pop(0)
num = s[0]
if num.isdigit():
s.pop(0)
rooms[2] = num
return rooms
def test_db(self):
conn = db_connector(db)
cur = conn.cursor()
cur.execute(
""" CREATE TABLE IF NOT EXISTS`tbl_property_ad` (
`id` INT NOT NULL,
`hash_id` INT NOT NULL,
`address` VARCHAR(100) NULL,
`price` INT NULL,
`price_text` VARCHAR(100) NULL,
`agent_name` VARCHAR(45) NULL,
`agent_company` VARCHAR(45) NULL,
`raw_list_text` VARCHAR(255) NULL,
`room.bed` INT NULL,
`room.bath` INT NULL,
`room.car` INT NULL,
`type` VARCHAR(45) NULL,
`subtype` VARCHAR(45) NULL,
`lat` DECIMAL NULL,
`long` DECIMAL NULL,
`address_normalized` VARCHAR(100) NULL,
`state` VARCHAR(10) NULL,
`postcode` VARCHAR(10) NULL,
`ad_url` VARCHAR(255) NULL,
`create_date` timestamp NULL DEFAULT NULL,
`last_seen_date` timestamp NULL DEFAULT NULL,
PRIMARY KEY (`id`,`hash_id`),
UNIQUE KEY `id_UNIQUE` (`id`),
UNIQUE KEY `hash_id_UNIQUE` (`hash_id`))
""")
conn.commit()
conn.close()
def insert_data(self):
cur = self.cur
cur.execute("INSERT INTO tbl_property_ad "
"(hash_id, address, type, subtype,"
" state, postcode, price_text, price, "
"`room.bed`, `room.bath`, `room.car`, "
"`raw_list_text`, `ad_url`,"
" `create_date`, `last_seen_date`) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
"ON DUPLICATE KEY UPDATE "
"address = %s, type = %s, subtype =%s, "
"state = %s, postcode =%s, price_text = %s, price=%s, "
"`room.bed` = %s, `room.bath` = %s, `room.car` = %s, "
"`raw_list_text`=%s, `ad_url`=%s, "
"`create_date`=%s, `last_seen_date`=%s ",
(self.hash_id, self.address, self.property_type, self.sub_type,
self.state, self.postcode, self.price_text, self.price,
self.room_bed, self.room_bath, self.room_car,
self.raw_ad_text, self.ad_url,
self.create_date, self.last_seen_date,
self.address, self.property_type, self.sub_type,
self.state, self.postcode, self.price_text, self.price,
self.room_bed, self.room_bath, self.room_car,
self.raw_ad_text, self.ad_url,
self.create_date, self.last_seen_date
))
self.write_queue_len += 1
if self.write_queue_len > 5000:
print "save 5000 lines..."
self._tgt_db_conn.commit()
self.write_queue_len = 0
if __name__ == "__main__":
parser = Parser()
# parser.scr_db = "./test/scraper_dumps.db"
# parser.tgt_db = "./test/database.db"
parser.test_db()
parser.extract_html_text(10000000)
|
lyso/scrape_realestate
|
parser_mysql.py
|
Python
|
gpl-3.0
| 10,544 | 0.000759 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
# Libraries included with basic python install
from bisect import bisect
import cmath
import collections
import copy
import functools
import heapq
import itertools
import math
import operator
import pytz
import Queue
import re
import time
import zlib
# Third party libraries added with pip
from sklearn.ensemble import RandomForestClassifier
import blaze # includes sqlalchemy, odo
import numpy
import scipy
import cvxopt
import cvxpy
from pykalman import KalmanFilter
import statsmodels.api as sm
import talib
from copulalib.copulalib import Copula
import theano
import xgboost
from arch import arch_model
from keras.models import Sequential
from keras.layers import Dense, Activation
import tensorflow as tf
class PythonPackageTestAlgorithm(QCAlgorithm):
'''Algorithm to test third party libraries'''
def Initialize(self):
self.SetStartDate(2013, 10, 7) #Set Start Date
self.SetStartDate(2013, 10, 7) #Set End Date
self.AddEquity("SPY", Resolution.Daily)
# numpy test
print "numpy test >>> print numpy.pi: " , numpy.pi
# scipy test:
print "scipy test >>> print mean of 1 2 3 4 5:", scipy.mean(numpy.array([1, 2, 3, 4, 5]))
#sklearn test
print "sklearn test >>> default RandomForestClassifier:", RandomForestClassifier()
# cvxopt matrix test
print "cvxopt >>>", cvxopt.matrix([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], (2,3))
# talib test
print "talib test >>>", talib.SMA(numpy.random.random(100))
# blaze test
blaze_test()
# cvxpy test
cvxpy_test()
# statsmodels test
statsmodels_test()
# pykalman test
pykalman_test()
# copulalib test
copulalib_test()
# theano test
theano_test()
# xgboost test
xgboost_test()
# arch test
arch_test()
# keras test
keras_test()
# tensorflow test
tensorflow_test()
def OnData(self, data): pass
def blaze_test():
accounts = blaze.symbol('accounts', 'var * {id: int, name: string, amount: int}')
deadbeats = accounts[accounts.amount < 0].name
L = [[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]]
print "blaze test >>>", list(blaze.compute(deadbeats, L))
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect(breakpoints, score)
return grades[i]
def cvxpy_test():
numpy.random.seed(1)
n = 10
mu = numpy.abs(numpy.random.randn(n, 1))
Sigma = numpy.random.randn(n, n)
Sigma = Sigma.T.dot(Sigma)
w = cvxpy.Variable(n)
gamma = cvxpy.Parameter(sign='positive')
ret = mu.T*w
risk = cvxpy.quad_form(w, Sigma)
print "csvpy test >>> ", cvxpy.Problem(cvxpy.Maximize(ret - gamma*risk),
[cvxpy.sum_entries(w) == 1,
w >= 0])
def statsmodels_test():
nsample = 100
x = numpy.linspace(0, 10, 100)
X = numpy.column_stack((x, x**2))
beta = numpy.array([1, 0.1, 10])
e = numpy.random.normal(size=nsample)
X = sm.add_constant(X)
y = numpy.dot(X, beta) + e
model = sm.OLS(y, X)
results = model.fit()
print "statsmodels tests >>>", results.summary()
def pykalman_test():
kf = KalmanFilter(transition_matrices = [[1, 1], [0, 1]], observation_matrices = [[0.1, 0.5], [-0.3, 0.0]])
measurements = numpy.asarray([[1,0], [0,0], [0,1]]) # 3 observations
kf = kf.em(measurements, n_iter=5)
print "pykalman test >>>", kf.filter(measurements)
def copulalib_test():
x = numpy.random.normal(size=100)
y = 2.5 * x + numpy.random.normal(size=100)
#Make the instance of Copula class with x, y and clayton family::
print "copulalib test >>>", Copula(x, y, family='clayton')
def theano_test():
a = theano.tensor.vector() # declare variable
out = a + a ** 10 # build symbolic expression
f = theano.function([a], out) # compile function
print "theano test >>>", f([0, 1, 2])
def xgboost_test():
data = numpy.random.rand(5,10) # 5 entities, each contains 10 features
label = numpy.random.randint(2, size=5) # binary target
print "xgboost test >>>", xgboost.DMatrix( data, label=label)
def arch_test():
r = numpy.array([0.945532630498276,
0.614772790142383,
0.834417758890680,
0.862344782601800,
0.555858715401929,
0.641058419842652,
0.720118656981704,
0.643948007732270,
0.138790608092353,
0.279264178231250,
0.993836948076485,
0.531967023876420,
0.964455754192395,
0.873171802181126,
0.937828816793698])
garch11 = arch_model(r, p=1, q=1)
res = garch11.fit(update_freq=10)
print "arch test >>>", res.summary()
def keras_test():
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(12, activation='relu', input_shape=(11,)))
# Add one hidden layer
model.add(Dense(8, activation='relu'))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
print "keras test >>>", model
def tensorflow_test():
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
sess = tf.Session()
node3 = tf.add(node1, node2)
print "tensorflow test >>>", "sess.run(node3): ", sess.run(node3)
|
Mendelone/forex_trading
|
Algorithm.Python/PythonPackageTestAlgorithm.py
|
Python
|
apache-2.0
| 6,403 | 0.016872 |
from omnibus.factories import websocket_connection_factory
def mousemove_connection_factory(auth_class, pubsub):
class GeneratedConnection(websocket_connection_factory(auth_class, pubsub)):
def close_connection(self):
self.pubsub.publish(
'mousemoves', 'disconnect',
sender=self.authenticator.get_identifier()
)
return super(GeneratedConnection, self).close_connection()
return GeneratedConnection
|
moccu/django-omnibus
|
examples/mousemove/example_project/connection.py
|
Python
|
bsd-3-clause
| 485 | 0.002062 |
from django.test import Client
import mock as mock
from image_converter.tests.base import ImageConversionBaseTestCase
from image_converter.utils.convert_image import convert_image_to_jpeg
__author__ = 'Dominic Dumrauf'
class ViewsTestCase(ImageConversionBaseTestCase):
"""
Tests the 'views'.
"""
def test_upload_get(self):
"""
Tests GETting the form initially.
"""
# Given
c = Client()
# When
response = c.get('/')
# Then
self.assertTemplateUsed(response, template_name='upload.html')
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
def test_upload_post_without_file(self):
"""
Tests POSTing a form which *lacks* a file.
"""
# Given
c = Client()
# When
response = c.post('/')
# Then
self.assertTemplateUsed(response, template_name='upload.html')
self.assertFormError(response, 'form', 'file', 'This field is required.')
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
def test_upload_post_with_non_image_file(self):
"""
Tests POSTing a form which contains a file but the file is not an image.
"""
# Given
c = Client()
# When
with open(self.non_image_file_path) as fp:
response = c.post('/', {'file': fp})
# Then
self.assertTemplateUsed(response, template_name='unsupported_image_file_error.html')
self.assertEqual(response.status_code, 200)
self.assertIn('file', response.context)
self.assertIn(self.non_image_file_name, response.content)
def test_upload_post_with_image_file(self):
"""
Tests POSTing a form which contains a file where the file is an image.
"""
# Given
c = Client()
# When
with open(self.image_file_path) as fp:
response = c.post('/', {'file': fp})
converted_image = convert_image_to_jpeg(fp)
# Then
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'], 'attachment; filename={0}.jpg'.format(self.image_file_name))
self.assertEqual(response.content, converted_image.getvalue())
@mock.patch('image_converter.views.convert_image_to_jpeg')
def test_unexpected_error_in_image_conversion_handling(self, convert_image_to_jpeg):
"""
Tests POSTing a form where converting the image raises an unexpected exception.
"""
# Given
convert_image_to_jpeg.side_effect = Exception()
c = Client()
# When
with open(self.non_image_file_path) as fp:
response = c.post('/', {'file': fp})
# Then
self.assertTemplateUsed(response, template_name='generic_error.html')
self.assertEqual(response.status_code, 200)
self.assertIn('file', response.context)
self.assertIn(self.non_image_file_name, response.content)
|
dumrauf/web_tools
|
image_converter/tests/test_views.py
|
Python
|
mit
| 3,078 | 0.001949 |
try:
try:
from _pydevd_frame_eval_ext import pydevd_frame_evaluator as mod
except ImportError:
from _pydevd_frame_eval import pydevd_frame_evaluator as mod
except ImportError:
try:
import sys
try:
is_64bits = sys.maxsize > 2 ** 32
except:
# In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.
raise ImportError
plat = '32'
if is_64bits:
plat = '64'
# We also accept things as:
#
# _pydevd_frame_eval.pydevd_frame_evaluator_win32_27_32
# _pydevd_frame_eval.pydevd_frame_evaluator_win32_34_64
#
# to have multiple pre-compiled pyds distributed along the IDE
# (generated by build_tools/build_binaries_windows.py).
mod_name = 'pydevd_frame_evaluator_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat)
check_name = '_pydevd_frame_eval.%s' % (mod_name,)
mod = __import__(check_name)
mod = getattr(mod, mod_name)
except ImportError:
raise
frame_eval_func = mod.frame_eval_func
stop_frame_eval = mod.stop_frame_eval
dummy_trace_dispatch = mod.dummy_trace_dispatch
get_thread_info_py = mod.get_thread_info_py
clear_thread_local_info = mod.clear_thread_local_info
|
leafclick/intellij-community
|
python/helpers/pydev/_pydevd_frame_eval/pydevd_frame_eval_cython_wrapper.py
|
Python
|
apache-2.0
| 1,343 | 0.002234 |
#
# Martin Kolman <mkolman@redhat.com>
#
# Copyright 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.errors import KickstartParseError
from pykickstart.options import KSOptionParser
from pykickstart.version import F29
from pykickstart.i18n import _
class F29_ModuleData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.name = kwargs.get("name", "")
self.stream = kwargs.get("stream", "")
def __eq__(self, y):
if not y:
return False
return (self.name == y.name and self.stream == y.stream)
def __ne__(self, y):
return not self == y
def __str__(self):
retval = BaseData.__str__(self)
retval += "module --name=%s --stream=%s" % (self.name, self.stream)
return retval.strip() + "\n"
class F29_Module(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.moduleList = kwargs.get("moduleList", [])
self.op = self._getParser()
def __str__(self):
retval = ""
for module in self.moduleList:
retval += module.__str__()
return retval
def _getParser(self):
op = KSOptionParser(prog="module", description="""
The module command makes it possible to manipulate
modules.
(In this case we mean modules as introduced by the
Fedora modularity initiative.)
A module is defined by a unique name and a stream id,
where single module can (and usually has) multiple
available streams.
Streams will in most cases corresponds to stable
releases of the given software components
(such as Node.js, Django, etc.) but there could be
also other use cases, such as a raw upstream master
branch stream or streams corresponding to an upcoming
stable release.
For more information see the Fedora modularity
initiative documentation:
https://docs.pagure.org/modularity/""", version=F29)
op.add_argument("--name", metavar="<module_name>", version=F29, required=True,
help="""
Name of the module to enable.""")
op.add_argument("--stream", metavar="<module_stream_name>", version=F29, required=False,
help="""
Name of the module stream to enable.""")
return op
def parse(self, args):
(ns, extra) = self.op.parse_known_args(args=args, lineno=self.lineno)
if len(extra) > 0:
msg = _("The enable module command does not take position arguments!")
raise KickstartParseError(msg, lineno=self.lineno)
enable_module_data = self.dataClass() # pylint: disable=not-callable
self.set_to_obj(ns, enable_module_data)
enable_module_data.lineno = self.lineno
return enable_module_data
def dataList(self):
return self.moduleList
@property
def dataClass(self):
return self.handler.ModuleData
|
atodorov/pykickstart
|
pykickstart/commands/module.py
|
Python
|
gpl-2.0
| 4,566 | 0.001314 |
# Python 3: ArchiveNM.py
# Function:
# This will collect the files in /home/postgres that
# need to be sent to a new Natural Message machine
# that is being initialized. This currently grabs
# directory server and shard server files.
# It can also be used as an archiver.
import datetime
import tarfile
import os
import sys
# For the version code, enter the format used
# in the naturalmsg_svr_#_#_#.py files
test_or_prod = 'prod'
version = '0_0_5'
DSTAMP = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# (do not add a trailing slash on directory names)
pgm_dir = '/var/natmsg'
sql_dir = '/home/postgres/shard/sql/' + test_or_prod
function_dir = '/home/postgres/shard/sql/' + test_or_prod + '/functions'
pgm_files = ('naturalmsg-svr' + version + '.py',
'shardfunc_cp' + version + '.py')
sql_files = ( \
'0001create_db.sh',
'0002create_tables.sql',
'0005shardserver.sql',
'0007shardbig.sql',
'0020payment.sql',
'0500sysmon.sql',
'blog01.sql' \
)
function_files = ( \
'nm_blog_entry_newest.sql',
'read_inbasket_stage010.sql',
'read_inbasket_stage020.sql',
'read_inbasket_stage030.sql',
'scan_shard_delete.sql',
'shard_burn.sql',
'shard_delete_db_entries.sql',
'shard_delete.sql',
'shard_expire_big.sql',
'shard_expire.sql',
'shard_id_exists.sql',
'smd_create0010.sql',
'sysmon001.sql' \
)
tar_fname_base = 'NatMsgSQLArchive' + version
tar_fname = tar_fname_base + '.tar'
if os.path.isfile(tar_fname):
# The tar file already exists, rename it
try:
os.renames(tar_fname, tar_fname_base + '-' + DSTAMP + '.tar')
except:
print('Error renaming an existing tar file: ' + tar_fname)
print('Maybe you do not have permission.')
sys.exit(12)
t = tarfile.TarFile(tar_fname, mode='w')
for f in pgm_files:
# the full path is already specified in the file list.
t.add(os.path.normpath(pgm_dir + '/' + f))
for f in sql_files:
t.add(os.path.normpath(sql_dir + '/' + f))
for f in function_files:
t.add(os.path.normpath(function_dir + '/' + f))
t.close()
|
naturalmessage/natmsgshardbig
|
sql/ArchiveNM.py
|
Python
|
gpl-3.0
| 2,018 | 0.018335 |
class IRCUser(object):
def __init__(self, user):
self.user = None
self.hostmask = None
self.last_active = None
if "!" in user:
user_array = user.split("!")
self.name = user_array[0]
if len(user_array) > 1:
user_array = user_array[1].split("@")
self.user = user_array[0]
self.hostmask = user_array[1]
else:
self.name = user
self.user = "anon"
self.hostmask = "unknown"
def __str__(self):
return "{}!{}@{}".format(self.name, self.user, self.hostmask)
|
HubbeKing/Hubbot_Twisted
|
hubbot/user.py
|
Python
|
mit
| 629 | 0 |
#!/usr/bin/python
import sys, os, time
from itertools import count
import logging
import tkp.database.database as database
import tkp.database.dataset as ds
import tkp.database.dbregion as reg
import tkp.database.utils as dbu
import monetdb.sql
from tkp.sourcefinder import image
from tkp.config import config
from tkp.utility import accessors, containers
db_enabled = config['database']['enabled']
db_host = config['database']['host']
db_user = config['database']['user']
db_passwd = config['database']['password']
db_dbase = config['database']['name']
db_port = config['database']['port']
db_autocommit = config['database']['autocommit']
basedir = config['test']['datapath']
imagesdir = basedir + '/fits'
regionfilesdir = basedir + '/regions'
if db_enabled:
db = database.DataBase(host=db_host, name=db_dbase, user=db_user, password=db_passwd, port=db_port, autocommit=db_autocommit)
try:
iter_start = time.time()
if db_enabled:
description = 'TRAPPED: LOFAR flare stars'
dataset = ds.DataSet(data={'dsinname': description}, database=db)
print "dataset.id:", dataset.id
i = 0
files = os.listdir(imagesdir)
files.sort()
for file in files:
my_fitsfile = accessors.FitsFile(imagesdir + '/' + file)
my_image = accessors.sourcefinder_image_from_accessor(my_fitsfile)
#print "type(my_image):",type(my_image)
print "\ni: ", i, "\nfile: ", file
if db_enabled:
dbimg = accessors.dbimage_from_accessor(dataset, my_fitsfile)
print "dbimg.id: ", dbimg.id
results = my_image.extract()
print results
if db_enabled:
dbu.insert_extracted_sources(db.connection, dbimg.id, results)
dbu.associate_extracted_sources(db.connection, dbimg.id)
dbu.associate_with_catalogedsources(db.connection, dbimg.id)
my_image.clearcache()
i += 1
db.close()
except db.Error, e:
print "Failed for reason: %s " % (e,)
raise
|
jjdmol/LOFAR
|
CEP/GSM/src/ms3_script.py
|
Python
|
gpl-3.0
| 2,005 | 0.002494 |
import logging
import configparser
import os
from utils import bool_query
class BreakRule(object):
def __init__(self, settings):
self.settings = settings
self.rules_record = configparser.ConfigParser()
self.rules_record.read("{}/tms/breakrules.ini".format(os.getcwd()))
self.rules = {}
for rule_id in self.rules_record.sections():
self.rules[rule_id] = self.rules_record.get(rule_id, "Description")
def _check_rule_exists(self, rule_id):
if self.rules.get(rule_id, None) is None:
logging.warning("Rule {} doesn't exist".format(rule_id))
return False
else:
logging.debug("Rule {} exists".format(rule_id))
return True
def _update_break_rule(self, rule_id):
self.settings.set("Settings", "BreakRule", rule_id)
with open("{}/tms/settings.ini".format(os.getcwd()), 'w') as configfile:
self.settings.write(configfile)
logging.info("Break rule changed to rule {}".format(self.settings.get("Settings", "BreakRule")))
def print_rules(self):
logging.info("Break Rules: ")
for rule_id in self.rules:
logging.info(' [{}] {}'.format(rule_id, self.rules[rule_id]))
def get_break_rule(self, desired_rule_id=None):
if not desired_rule_id: desired_rule_id = self.settings.get("Settings", "BreakRule")
if self._check_rule_exists(desired_rule_id):
for rule_id in self.rules:
if rule_id == desired_rule_id:
logging.info(' [{}] {}'.format(rule_id, self.rules[desired_rule_id]))
def cmd_update_break_rule(self):
self.print_rules()
selection_query = None
while selection_query is None:
logging.info('Please enter the ID of the rule to be used...')
selection = input()
try:
int(selection)
except ValueError:
logging.warning('WARNING: Please enter a numeric value corresponding to a rule ID.')
else:
if self._check_rule_exists(selection):
selection_query = bool_query('Select Rule "{}" for use?'.format(selection, default="y"))
self._update_break_rule(selection)
|
marmstr93ng/TimeManagementSystem
|
tms/breakrule.py
|
Python
|
mit
| 2,292 | 0.004363 |
# -*- coding: utf-8 -*-
__all__ = [
"CreateTablesCommand", "DropTablesCommand", "UpdateCommand",
]
from flask.ext.script import Command, Option
from .models import db
from .update import update
class CreateTablesCommand(Command):
def run(self):
db.create_all()
class DropTablesCommand(Command):
def run(self):
db.drop_all()
class UpdateCommand(Command):
option_list = (
Option("-s", "--since", dest="since", required=False),
)
def run(self, since):
update(since=since)
|
pombredanne/osrc
|
osrc/manage.py
|
Python
|
mit
| 536 | 0 |
import random
def printifyInstruction(instr, mcs):
"""
Construct a preaty representation of the instruction in memory.
mcs -> 'maximum characters span'
"""
return "({0:{3}d}, {1:{3}d}, {2:{3}d})".format(instr['A'], instr['B'], instr['C'], mcs)
class Orbis:
def __init__(self, gSize):
self.pc = 0
self.instructions = []
self.gsize = gSize
for i in range(gSize * 3):
if (i % 3) != 2:
# We are either on operand A or operand B initialization branch.
self.instructions.append(random.randrange(0, gSize * 3))
else:
# We are on the address C initialization branch.
self.instructions.append(random.randrange(0, gSize) * 3)
def shock(self):
self.pc = 0
for g in range(self.gsize):
print "Evaluating gene {0} ...".format(self.pc / 3)
ta = self.instructions[g * 3]
tb = self.instructions[g * 3 + 1]
tc = self.instructions[g * 3 + 2]
cstem = self.instructions[tb] - self.instructions[ta]
if (tb % 3) == 2:
# We will affect the jump part of a gene. Make sure it remains consistent with the rest of the genes
cvtor = cstem % 3
prevtc = self.instructions[tb]
if cvtor == 0:
# The current value is a valid gene address. It's Ok to use it
self.instructions[tb] = cstem % (self.gsize * 3)
elif cvtor == 1:
# The current value is closer to the lower side
self.instructions[tb] = (cstem - 1) % (self.gsize * 3)
else:
# The current value is closer to the upper side
self.instructions[tb] = (cstem + 1) % (self.gsize * 3)
else:
# We are in the data domain. Just ensure that the resulting numerals are bounded to the current information domain
self.instructions[tb] = cstem % (self.gsize * 3)
if self.instructions[tb] >= self.gsize * 3:
raise IndexError("Invalid C address generated! Previous C value was {0} while cvtor was {1}".format(prevtc, cvtor))
if self.instructions[tb] <= tc:
self.pc = tc
else:
self.pc = self.pc + 3
def getInstruction(self, addr):
if addr >= (self.gsize * 3) or (addr % 3) != 0:
raise Exception("The address supplied is not valid!")
return {'A': self.instructions[addr], 'B': self.instructions[addr + 1], 'C': self.instructions[addr + 2]}
def __str__(self):
orbisPrintString = ""
instrRealAddress = 0
maxGeneCharPrintCount = len(str(len(self.instructions)))
for i in range(self.gsize):
orbisPrintString = orbisPrintString + '{0:{3}d}. [{1:{3}d}] {2}\n'.format(i, i * 3, printifyInstruction(self.getInstruction(i * 3), maxGeneCharPrintCount), maxGeneCharPrintCount)
instrRealAddress += 3
return orbisPrintString
if __name__ == "__main__":
x = Orbis(256)
print 'Original orbis: \n', x
print 'Shocking the world...'
for i in range(100):
print "Shock nr. {0} ...".format(i)
try:
x.shock()
except IndexError as e:
print "IndexError message received! World evaluation halted."
print "Exception message: {0}".format(e.args)
print x
exit()
print x
|
3Nigma/jale
|
main.py
|
Python
|
mit
| 3,579 | 0.00475 |
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ast
import os
import time
import sys
# from jsonrpc import JSONRPCResponseManager
import jsonrpclib
from .jsonrpc import VerifyingJSONRPCServer
from .version import PACKAGE_VERSION
from .network import Network
from .util import (json_decode, DaemonThread, print_error, to_string,
standardize_path)
from .wallet import Wallet
from .storage import WalletStorage
from .commands import known_commands, Commands
from .simple_config import SimpleConfig
from .exchange_rate import FxThread
def get_lockfile(config):
return os.path.join(config.path, 'daemon')
def remove_lockfile(lockfile):
try:
os.unlink(lockfile)
print_error("Removed lockfile:", lockfile)
except OSError as e:
print_error("Could not remove lockfile:", lockfile, repr(e))
def get_fd_or_server(config):
'''Tries to create the lockfile, using O_EXCL to
prevent races. If it succeeds it returns the FD.
Otherwise try and connect to the server specified in the lockfile.
If this succeeds, the server is returned. Otherwise remove the
lockfile and try again.'''
lockfile = get_lockfile(config)
limit = 5 # prevent infinite looping here. Give up after 5 attempts.
latest_exc = None
for n in range(limit):
try:
return os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644), None
except PermissionError as e:
sys.exit(f"Unable to create lockfile due to file system permission problems: {e}")
except NotADirectoryError as e:
lockdir = os.path.dirname(lockfile)
sys.exit(f"Electron Cash directory location at {lockdir} is not a directory. Error was: {e}")
except OSError as e:
''' Unable to create -- this is normal if there was a pre-existing lockfile '''
latest_exc = e
server = get_server(config)
if server is not None:
return None, server
# Couldn't connect; remove lockfile and try again.
remove_lockfile(lockfile)
sys.exit(f"Unable to open/create lockfile at {lockfile} after {limit} attempts. Please check your filesystem setup. Last error was: {repr(latest_exc)}")
def get_server(config, timeout=2.0):
assert timeout > 0.0
lockfile = get_lockfile(config)
while True:
create_time = None
try:
with open(lockfile) as f:
(host, port), tmp_create_time = ast.literal_eval(f.read())
create_time = float(tmp_create_time); del tmp_create_time # ensures create_time is float; raises if create_time is not-float-compatible
rpc_user, rpc_password = get_rpc_credentials(config)
if rpc_password == '':
# authentication disabled
server_url = 'http://%s:%d' % (host, port)
else:
server_url = 'http://%s:%s@%s:%d' % (
rpc_user, rpc_password, host, port)
server = jsonrpclib.Server(server_url)
# Test daemon is running
server.ping()
return server
except Exception as e:
print_error("[get_server]", e)
# Note that the create_time may be in the future if there was a clock
# adjustment by system ntp, etc. We guard against this, with some
# tolerance. The net effect here is in normal cases we wait for the
# daemon, giving up after timeout seconds (or at worst timeout*2 seconds
# in the pathological case of a clock adjustment happening
# at the precise time the daemon was starting up).
if not create_time or abs(time.time() - create_time) > timeout:
return None
# Sleep a bit and try again; it might have just been started
time.sleep(1.0)
def get_rpc_credentials(config):
rpc_user = config.get('rpcuser', None)
rpc_password = config.get('rpcpassword', None)
if rpc_user is None or rpc_password is None:
rpc_user = 'user'
import ecdsa, base64
bits = 128
nbytes = bits // 8 + (bits % 8 > 0)
pw_int = ecdsa.util.randrange(pow(2, bits))
pw_b64 = base64.b64encode(
pw_int.to_bytes(nbytes, 'big'), b'-_')
rpc_password = to_string(pw_b64, 'ascii')
config.set_key('rpcuser', rpc_user)
config.set_key('rpcpassword', rpc_password, save=True)
elif rpc_password == '':
from .util import print_stderr
print_stderr('WARNING: RPC authentication is disabled.')
return rpc_user, rpc_password
class Daemon(DaemonThread):
def __init__(self, config, fd, is_gui, plugins):
DaemonThread.__init__(self)
self.plugins = plugins
self.config = config
if config.get('offline'):
self.network = None
else:
self.network = Network(config)
self.network.start()
self.fx = FxThread(config, self.network)
if self.network:
self.network.add_jobs([self.fx])
self.gui = None
self.wallets = {}
# Setup JSONRPC server
self.init_server(config, fd, is_gui)
def init_server(self, config, fd, is_gui):
host = config.get('rpchost', '127.0.0.1')
port = config.get('rpcport', 0)
rpc_user, rpc_password = get_rpc_credentials(config)
try:
server = VerifyingJSONRPCServer((host, port), logRequests=False,
rpc_user=rpc_user, rpc_password=rpc_password)
except Exception as e:
self.print_error('Warning: cannot initialize RPC server on host', host, e)
self.server = None
os.close(fd)
return
os.write(fd, bytes(repr((server.socket.getsockname(), time.time())), 'utf8'))
os.close(fd)
self.server = server
server.timeout = 0.1
server.register_function(self.ping, 'ping')
server.register_function(self.run_gui, 'gui')
server.register_function(self.run_daemon, 'daemon')
self.cmd_runner = Commands(self.config, None, self.network)
for cmdname in known_commands:
server.register_function(getattr(self.cmd_runner, cmdname), cmdname)
server.register_function(self.run_cmdline, 'run_cmdline')
def ping(self):
return True
def run_daemon(self, config_options):
config = SimpleConfig(config_options)
sub = config.get('subcommand')
subargs = config.get('subargs')
plugin_cmd = self.plugins and self.plugins.daemon_commands.get(sub)
if subargs and sub in [None, 'start', 'stop', 'status']:
return "Unexpected arguments: {!r}. {!r} takes no options.".format(subargs, sub)
if subargs and sub in ['load_wallet', 'close_wallet']:
return "Unexpected arguments: {!r}. Provide options to {!r} using the -w and -wp options.".format(subargs, sub)
if sub in [None, 'start']:
response = "Daemon already running"
elif sub == 'load_wallet':
path = config.get_wallet_path()
wallet = self.load_wallet(path, config.get('password'))
self.cmd_runner.wallet = wallet
response = True
elif sub == 'close_wallet':
path = config.get_wallet_path()
if path in self.wallets:
self.stop_wallet(path)
response = True
else:
response = False
elif sub == 'status':
if self.network:
p = self.network.get_parameters()
response = {
'path': self.network.config.path,
'server': p[0],
'blockchain_height': self.network.get_local_height(),
'server_height': self.network.get_server_height(),
'spv_nodes': len(self.network.get_interfaces()),
'connected': self.network.is_connected(),
'auto_connect': p[4],
'version': PACKAGE_VERSION,
'wallets': {k: w.is_up_to_date()
for k, w in self.wallets.items()},
'fee_per_kb': self.config.fee_per_kb(),
}
else:
response = "Daemon offline"
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
elif plugin_cmd is not None:
# note that daemon's own commands take precedence, i.e., a plugin CANNOT override 'load_wallet'.
response = plugin_cmd(self, config)
else:
return "Unrecognized subcommand {!r}".format(sub)
return response
def run_gui(self, config_options):
config = SimpleConfig(config_options)
if self.gui:
if hasattr(self.gui, 'new_window'):
# This tells the gui to open the current wallet if any,
# or the last wallet if no wallets are currently open.
self.gui.new_window(None, config.get('url'))
response = "ok"
else:
response = "error: current GUI does not support multiple windows"
else:
response = "Error: Electron Cash is running in daemon mode. Please stop the daemon first."
return response
def load_wallet(self, path, password):
path = standardize_path(path)
# wizard will be launched if we return
if path in self.wallets:
wallet = self.wallets[path]
return wallet
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
return
if storage.is_encrypted():
if not password:
return
storage.decrypt(password)
if storage.requires_split():
return
if storage.requires_upgrade():
return
if storage.get_action():
return
wallet = Wallet(storage)
wallet.start_threads(self.network)
self.wallets[path] = wallet
return wallet
def add_wallet(self, wallet):
path = wallet.storage.path
self.wallets[path] = wallet
def get_wallet(self, path):
return self.wallets.get(path)
def delete_wallet(self, path):
self.stop_wallet(path)
if os.path.exists(path):
os.unlink(path)
return True
return False
def stop_wallet(self, path):
# Issue #659 wallet may already be stopped.
if path in self.wallets:
wallet = self.wallets.pop(path)
wallet.stop_threads()
def run_cmdline(self, config_options):
password = config_options.get('password')
new_password = config_options.get('new_password')
config = SimpleConfig(config_options)
config.fee_estimates = self.network.config.fee_estimates.copy()
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
if cmd.requires_wallet:
path = config.get_wallet_path()
wallet = self.wallets.get(path)
if wallet is None:
return {'error': 'Wallet "%s" is not loaded. Use "electron-cash daemon load_wallet"'%os.path.basename(path) }
else:
wallet = None
# arguments passed to function
args = map(lambda x: config.get(x), cmd.params)
# decode json arguments
args = [json_decode(i) for i in args]
# options
kwargs = {}
for x in cmd.options:
kwargs[x] = (config_options.get(x) if x in ['password', 'new_password'] else config.get(x))
cmd_runner = Commands(config, wallet, self.network)
func = getattr(cmd_runner, cmd.name)
try:
result = func(*args, **kwargs)
except TypeError as e:
raise Exception("Wrapping TypeError to prevent JSONRPC-Pelix from hiding traceback") from e
return result
def run(self):
while self.is_running():
self.server.handle_request() if self.server else time.sleep(0.1)
for k, wallet in self.wallets.items():
wallet.stop_threads()
if self.network:
self.print_error("shutting down network")
self.network.stop()
self.network.join()
self.on_stop()
def stop(self):
self.print_error("stopping, removing lockfile")
remove_lockfile(get_lockfile(self.config))
super().stop()
def init_gui(self):
config = self.config
plugins = self.plugins
gui_name = config.get('gui', 'qt')
if gui_name in ['lite', 'classic']:
gui_name = 'qt'
if (sys.platform in ('windows', 'win32')
and config.get('qt_opengl') and gui_name == 'qt'):
# Hack to force QT_OPENGL env var. See #1255
#
# Note if the user provides a bad override here.. the app may crash
# or not run properly on windows. We don't do anything about that
# since this command line option is ultimately intended to just
# be used for an installer-generated shortcut.
#
os.environ['QT_OPENGL'] = str(config.get('qt_opengl'))
gui = __import__('electroncash_gui.' + gui_name, fromlist=['electroncash_gui'])
self.gui = gui.ElectrumGui(config, self, plugins)
self.gui.main()
|
fyookball/electrum
|
lib/daemon.py
|
Python
|
mit
| 14,615 | 0.001779 |
# -*- coding: utf-8 -*-
from io import BytesIO
import pytest
from phi.request.form import FormRequest
class TestFormRequest(object):
@pytest.fixture
def form_req(self):
fr = FormRequest()
fr.charset = "utf-8"
return fr
@pytest.mark.parametrize("body, content", [
(
"name=test&blah=asdfdasf+&check=on",
{"blah": "asdfdasf ", "name": "test", "check": "on"}
),
(
"name=test&blah=asdfdasf+&check=on",
{"blah": "asdfdasf ", "name": "test", "check": "on"}
),
])
def test_body(self, body, content, form_req):
stream = BytesIO(body.encode("utf-8"))
stream.seek(0)
form_req._content_stream = stream
form_req.content_length = len(body)
assert form_req._get_body() == content
|
RafaelSzefler/phi
|
tests/unit/request/test_form.py
|
Python
|
mit
| 834 | 0 |
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999, 2001, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# A dialog for specifying line properties
#
import operator
from X import LineDoubleDash
from Sketch.const import JoinMiter, JoinBevel, JoinRound,\
CapButt, CapProjecting, CapRound
from Sketch.Lib import util
from Sketch import _, Trafo, SimpleGC, SolidPattern, EmptyPattern, \
StandardDashes, StandardArrows, StandardColors
from Tkinter import Frame, Label, IntVar, LEFT, X, E, W, GROOVE
from tkext import ColorButton, UpdatedCheckbutton, MyOptionMenu2
from sketchdlg import StylePropertyPanel
from lengthvar import create_length_entry
import skpixmaps
pixmaps = skpixmaps.PixmapTk
def create_bitmap_image(tk, name, bitmap):
data = util.xbm_string(bitmap)
tk.call(('image', 'create', 'bitmap', name, '-foreground', 'black',
'-data', data, '-maskdata', data))
return name
_thickness = 3
_width = 90
def draw_dash_bitmap(gc, dashes):
scale = float(_thickness)
if dashes:
dashes = map(operator.mul, dashes, [scale] * len(dashes))
dashes = map(int, map(round, dashes))
for idx in range(len(dashes)):
length = dashes[idx]
if length <= 0:
dashes[idx] = 1
elif length > 255:
dashes[idx] = 255
else:
dashes = [_width + 10, 1]
gc.SetDashes(dashes)
gc.DrawLine(0, _thickness / 2, _width, _thickness / 2)
def create_dash_images(tk, tkwin, dashes):
bitmap = tkwin.CreatePixmap(_width, _thickness, 1)
gc = bitmap.CreateGC(foreground = 1, background = 0,
line_style = LineDoubleDash, line_width = _thickness)
images = []
for dash in dashes:
draw_dash_bitmap(gc, dash)
image = create_bitmap_image(tk, 'dash_' + `len(images)`, bitmap)
images.append((image, dash))
return gc, bitmap, images
_arrow_width = 31
_arrow_height = 25
_mirror = Trafo(-1, 0, 0, 1, 0, 0)
def draw_arrow_bitmap(gc, arrow, which = 2):
gc.gc.foreground = 0
gc.gc.FillRectangle(0, 0, _arrow_width + 1, _arrow_height + 1)
gc.gc.foreground = 1
y = _arrow_height / 2
if which == 1:
gc.PushTrafo()
gc.Concat(_mirror)
gc.DrawLineXY(0, 0, -1000, 0)
if arrow is not None:
arrow.Draw(gc)
if which == 1:
gc.PopTrafo()
def create_arrow_images(tk, tkwin, arrows):
arrows = [None] + arrows
bitmap = tkwin.CreatePixmap(_arrow_width, _arrow_height, 1)
gc = SimpleGC()
gc.init_gc(bitmap, foreground = 1, background = 0, line_width = 3)
gc.Translate(_arrow_width / 2, _arrow_height / 2)
gc.Scale(2)
images1 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 1)
image = create_bitmap_image(tk, 'arrow1_' + `len(images1)`, bitmap)
images1.append((image, arrow))
images2 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 2)
image = create_bitmap_image(tk, 'arrow2_' + `len(images2)`, bitmap)
images2.append((image, arrow))
return gc, bitmap, images1, images2
class LinePanel(StylePropertyPanel):
title = _("Line Style")
def __init__(self, master, main_window, doc):
StylePropertyPanel.__init__(self, master, main_window, doc,
name = 'linedlg')
def build_dlg(self):
top = self.top
button_frame = self.create_std_buttons(top)
button_frame.grid(row = 5, columnspan = 2, sticky = 'ew')
color_frame = Frame(top, relief = GROOVE, bd = 2)
color_frame.grid(row = 0, columnspan = 2, sticky = 'ew')
label = Label(color_frame, text = _("Color"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.color_but = ColorButton(color_frame, width = 3, height = 1,
command = self.set_line_color)
self.color_but.SetColor(StandardColors.black)
self.color_but.pack(side = LEFT, expand = 1, anchor = W)
self.var_color_none = IntVar(top)
check = UpdatedCheckbutton(color_frame, text = _("None"),
variable = self.var_color_none,
command = self.do_apply)
check.pack(side = LEFT, expand = 1)
width_frame = Frame(top, relief = GROOVE, bd = 2)
width_frame.grid(row = 1, columnspan = 2, sticky = 'ew')
label = Label(width_frame, text = _("Width"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.var_width = create_length_entry(top, width_frame,
self.set_line_width,
scroll_pad = 0)
tkwin = self.main_window.canvas.tkwin
gc, bitmap, dashlist = create_dash_images(self.top.tk, tkwin,
StandardDashes())
self.opt_dash = MyOptionMenu2(top, dashlist, command = self.set_dash,
entry_type = 'image',
highlightthickness = 0)
self.opt_dash.grid(row = 2, columnspan = 2, sticky = 'ew', ipady = 2)
self.dash_gc = gc
self.dash_bitmap = bitmap
gc, bitmap, arrow1, arrow2 = create_arrow_images(self.top.tk, tkwin,
StandardArrows())
self.opt_arrow1 = MyOptionMenu2(top, arrow1, command = self.set_arrow,
args = 1, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow1.grid(row = 3, column = 0, sticky = 'ew', ipady = 2)
self.opt_arrow2 = MyOptionMenu2(top, arrow2, command = self.set_arrow,
args = 2, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow2.grid(row = 3, column = 1, sticky = 'ew', ipady = 2)
self.arrow_gc = gc
self.arrow_bitmap = bitmap
self.opt_join = MyOptionMenu2(top, [(pixmaps.JoinMiter, JoinMiter),
(pixmaps.JoinRound, JoinRound),
(pixmaps.JoinBevel, JoinBevel)],
command = self.set_line_join,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_join.grid(row = 4, column = 0, sticky = 'ew')
self.opt_cap = MyOptionMenu2(top,
[(pixmaps.CapButt, CapButt),
(pixmaps.CapRound, CapRound),
(pixmaps.CapProjecting, CapProjecting)],
command = self.set_line_cap,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_cap.grid(row = 4, column = 1, sticky = 'ew')
self.opt_cap.SetValue(None)
def close_dlg(self):
StylePropertyPanel.close_dlg(self)
self.var_width = None
def init_from_style(self, style):
if style.HasLine():
self.var_color_none.set(0)
self.opt_join.SetValue(style.line_join)
self.opt_cap.SetValue(style.line_cap)
self.color_but.SetColor(style.line_pattern.Color())
self.var_width.set(style.line_width)
self.init_dash(style)
self.init_arrow(style)
else:
self.var_color_none.set(1)
def init_from_doc(self):
self.Update()
def Update(self):
if self.document.HasSelection():
properties = self.document.CurrentProperties()
self.init_from_style(properties)
def do_apply(self):
kw = {}
if not self.var_color_none.get():
color = self.color_but.Color()
kw["line_pattern"] = SolidPattern(color)
kw["line_width"] = self.var_width.get()
kw["line_join"] = self.opt_join.GetValue()
kw["line_cap"] = self.opt_cap.GetValue()
kw["line_dashes"] = self.opt_dash.GetValue()
kw["line_arrow1"] = self.opt_arrow1.GetValue()
kw["line_arrow2"] = self.opt_arrow2.GetValue()
else:
kw["line_pattern"] = EmptyPattern
self.set_properties(_("Set Outline"), 'line', kw)
def set_line_join(self, *args):
self.document.SetProperties(line_join = self.opt_join.GetValue(),
if_type_present = 1)
def set_line_cap(self, *args):
self.document.SetProperties(line_cap = self.opt_cap.GetValue(),
if_type_present = 1)
def set_line_color(self):
self.document.SetLineColor(self.color_but.Color())
def set_line_width(self, *rest):
self.document.SetProperties(line_width = self.var_width.get(),
if_type_present = 1)
def set_dash(self, *args):
self.document.SetProperties(line_dashes = self.opt_dash.GetValue(),
if_type_present = 1)
def init_dash(self, style):
dashes = style.line_dashes
draw_dash_bitmap(self.dash_gc, dashes)
dash_image = create_bitmap_image(self.top.tk, 'dash_image',
self.dash_bitmap)
self.opt_dash.SetValue(dashes, dash_image)
def set_arrow(self, arrow, which):
if which == 1:
self.document.SetProperties(line_arrow1 = arrow,
if_type_present = 1)
else:
self.document.SetProperties(line_arrow2 = arrow,
if_type_present = 1)
def init_arrow(self, style):
arrow = style.line_arrow1
draw_arrow_bitmap(self.arrow_gc, arrow, 1)
arrow_image = create_bitmap_image(self.top.tk, 'arrow1_image',
self.arrow_bitmap)
self.opt_arrow1.SetValue(arrow, arrow_image)
arrow = style.line_arrow2
draw_arrow_bitmap(self.arrow_gc, arrow, 2)
arrow_image = create_bitmap_image(self.top.tk, 'arrow2_image',
self.arrow_bitmap)
self.opt_arrow2.SetValue(arrow, arrow_image)
def update_from_object_cb(self, obj):
if obj is not None:
self.init_from_style(obj.Properties())
|
shumik/skencil-c
|
Sketch/UI/linedlg.py
|
Python
|
gpl-2.0
| 11,272 | 0.016856 |
# We try to improve the previous 'math_operation.py' by reduce the code
# here we introduce a concept named list-comprehensive
# Knowledge points:
# 1. list-comprehensive, the [x for x in a-list]
# 2. dir() function to get the current environment variable names in space.
# 3. "in" check, we never user string.search() in Java, but use "in" to check string existence.
# 4. "eval()" function to run the expression
# still, we expect user to input something
user_input = raw_input("Please input a sequence of numbers, like: 1 2 3.1 2.1 -3 9: \n")
# Split user_inputed numbers into individual numbers, store it in a list.
possible_numbers = user_input.strip().split(" ")
# We user list comprehensive to get rid of "for" loop, reduce code amount.
float_numbers = [float(x) for x in possible_numbers]
# absolute numbers
absolute_numbers = [abs(x) for x in float_numbers]
# rounded numbers, in "int" style
int_numbers = [int(round(x)) for x in float_numbers]
import math
# floored numbers
floored_numbers = [math.floor(x) for x in float_numbers]
# ceilled numbers
ceil_numbers = [math.ceil(x) for x in float_numbers]
# alright, lets try to print smartly about all the numbers we have
# use the function "dir()"
env_variables = dir()
for var in env_variables:
if "_numbers" in var:
print var, ":", eval(var)
|
laalaguer/pythonlearn
|
01-basic/math_improved.py
|
Python
|
mit
| 1,306 | 0.004594 |
from django.conf.urls import url
from django.conf.urls import patterns
from pyday_alarms import views
app_name = 'pyday_alarms'
urlpatterns = [
url(r'^alarms/$', views.AlarmView.as_view(), name='alarms'),
]
'''urlpatterns += patterns('pyday_social_network.views',
url(r'^list/$', 'list', name='list'))
'''
|
6desislava6/PyDay
|
pyday_alarms/urls.py
|
Python
|
mit
| 336 | 0 |
#-----------------------------------------------------------------------------
# Copyright (c) 2017-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('google-cloud-speech')
|
etherkit/OpenBeacon2
|
client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-google.cloud.speech.py
|
Python
|
gpl-3.0
| 603 | 0.003317 |
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
from kafkatest.version import LATEST_0_9, LATEST_0_10, TRUNK, KafkaVersion
class MessageFormatChangeTest(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(MessageFormatChangeTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
self.messages_per_producer = 100
def produce_and_consume(self, producer_version, consumer_version, group):
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic,
throughput=self.producer_throughput,
message_validator=is_int,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, new_consumer=False, consumer_timeout_ms=30000,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.consumer.group_id = group
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
@parametrize(producer_version=str(TRUNK), consumer_version=str(TRUNK))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
def test_compatibility(self, producer_version, consumer_version):
""" This tests performs the following checks:
The workload is a mix of 0.9.x and 0.10.x producers and consumers
that produce to and consume from a 0.10.x cluster
1. initially the topic is using message format 0.9.0
2. change the message format version for topic to 0.10.0 on the fly.
3. change the message format version for topic back to 0.9.0 on the fly.
- The producers and consumers should not have any issue.
- Note that for 0.9.x consumers/producers we only do steps 1 and 2
"""
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=TRUNK, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
self.kafka.start()
self.logger.info("First format change to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group1")
self.logger.info("Second format change to 0.10.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, "group2")
if producer_version == str(TRUNK) and consumer_version == str(TRUNK):
self.logger.info("Third format change back to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group3")
|
geeag/kafka
|
tests/kafkatest/tests/client/message_format_change_test.py
|
Python
|
apache-2.0
| 4,644 | 0.004522 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the RetinaNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from absl import logging
import tensorflow.compat.v2 as tf
from tensorflow.python.keras import backend
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
class RetinanetModel(base_model.Model):
"""RetinaNet model function."""
def __init__(self, params):
super(RetinanetModel, self).__init__(params)
# For eval metrics.
self._params = params
# Architecture generators.
self._backbone_fn = factory.backbone_generator(params)
self._fpn_fn = factory.multilevel_features_generator(params)
self._head_fn = factory.retinanet_head_generator(params.retinanet_head)
# Loss function.
self._cls_loss_fn = losses.RetinanetClassLoss(params.retinanet_loss)
self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)
self._box_loss_weight = params.retinanet_loss.box_loss_weight
self._keras_model = None
# Predict function.
self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(
params.postprocess)
self._transpose_input = params.train.transpose_input
assert not self._transpose_input, 'Transpose input is not supportted.'
# Input layer.
input_shape = (
params.retinanet_parser.output_size +
[params.retinanet_parser.num_channels])
self._input_layer = tf.keras.layers.Input(
shape=input_shape, name='',
dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32)
def build_outputs(self, inputs, mode):
# If the input image is transposed (from NHWC to HWCN), we need to revert it
# back to the original shape before it's used in the computation.
if self._transpose_input:
inputs = tf.transpose(inputs, [3, 0, 1, 2])
backbone_features = self._backbone_fn(
inputs, is_training=(mode == mode_keys.TRAIN))
fpn_features = self._fpn_fn(
backbone_features, is_training=(mode == mode_keys.TRAIN))
cls_outputs, box_outputs = self._head_fn(
fpn_features, is_training=(mode == mode_keys.TRAIN))
if self._use_bfloat16:
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
model_outputs = {
'cls_outputs': cls_outputs,
'box_outputs': box_outputs,
}
return model_outputs
def build_loss_fn(self):
if self._keras_model is None:
raise ValueError('build_loss_fn() must be called after build_model().')
filter_fn = self.make_filter_trainable_variables_fn()
trainable_variables = filter_fn(self._keras_model.trainable_variables)
def _total_loss_fn(labels, outputs):
cls_loss = self._cls_loss_fn(outputs['cls_outputs'],
labels['cls_targets'],
labels['num_positives'])
box_loss = self._box_loss_fn(outputs['box_outputs'],
labels['box_targets'],
labels['num_positives'])
model_loss = cls_loss + self._box_loss_weight * box_loss
l2_regularization_loss = self.weight_decay_loss(self._l2_weight_decay,
trainable_variables)
total_loss = model_loss + l2_regularization_loss
return {
'total_loss': total_loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
'l2_regularization_loss': l2_regularization_loss,
}
return _total_loss_fn
def build_model(self, params, mode=None):
if self._keras_model is None:
with backend.get_graph().as_default():
outputs = self.model_outputs(self._input_layer, mode)
model = tf.keras.models.Model(
inputs=self._input_layer, outputs=outputs, name='retinanet')
assert model is not None, 'Fail to build tf.keras.Model.'
model.optimizer = self.build_optimizer()
self._keras_model = model
return self._keras_model
def post_processing(self, labels, outputs):
# TODO(yeqing): Moves the output related part into build_outputs.
required_output_fields = ['cls_outputs', 'box_outputs']
for field in required_output_fields:
if field not in outputs:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_output_fields, outputs.keys())
required_label_fields = ['image_info', 'groundtruths']
for field in required_label_fields:
if field not in labels:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_label_fields, labels.keys())
boxes, scores, classes, valid_detections = self._generate_detections_fn(
outputs['box_outputs'], outputs['cls_outputs'],
labels['anchor_boxes'], labels['image_info'][:, 1:2, :])
# Discards the old output tensors to save memory. The `cls_outputs` and
# `box_outputs` are pretty big and could potentiall lead to memory issue.
outputs = {
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info'],
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
}
if 'groundtruths' in labels:
labels['source_id'] = labels['groundtruths']['source_id']
labels['boxes'] = labels['groundtruths']['boxes']
labels['classes'] = labels['groundtruths']['classes']
labels['areas'] = labels['groundtruths']['areas']
labels['is_crowds'] = labels['groundtruths']['is_crowds']
return labels, outputs
def eval_metrics(self):
return eval_factory.evaluator_generator(self._params.eval)
|
alexgorban/models
|
official/vision/detection/modeling/retinanet_model.py
|
Python
|
apache-2.0
| 6,957 | 0.003881 |
import glob
import os
import json
import sys
import argparse
from collections import defaultdict
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--screen-name", required=True, help="Screen name of twitter user")
args = vars(ap.parse_args())
SEED = args['screen_name']
users = defaultdict(lambda: { 'followers': 0 })
for f in glob.glob('twitter-users/*.json'):
print "loading " + str(f)
data = json.load(file(f))
screen_name = data['screen_name']
users[screen_name] = { 'followers': data['followers_count'], 'id':data['id'] }
def process_follower_list(screen_name, edges=[], depth=0, max_depth=5):
f = os.path.join('following', screen_name + '.csv')
print "processing " + str(f)
if not os.path.exists(f):
return edges
followers = [line.strip().split('\t') for line in file(f)]
for follower_data in followers:
if len(follower_data) < 2:
continue
screen_name_2 = follower_data[1]
# use the number of followers for screen_name as the weight
weight = users[screen_name]['followers']
edges.append([users[screen_name]['id'], follower_data[0], weight])
if depth+1 < max_depth:
process_follower_list(screen_name_2, edges, depth+1, max_depth)
return edges
edges = process_follower_list(SEED, max_depth=5)
with open('twitter_network.csv', 'w') as outf:
edge_exists = {}
for edge in edges:
key = ','.join([str(x) for x in edge])
if not(key in edge_exists):
outf.write('%s,%s,%d\n' % (edge[0], edge[1], edge[2]))
edge_exists[key] = True
|
edent/Twitter-Networks
|
GenerateNetwork.py
|
Python
|
mit
| 1,614 | 0.005576 |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.common import progressbar
from glanceclient.common import utils
from glanceclient import exc
import json
import os
from os.path import expanduser
IMAGE_SCHEMA = None
def get_image_schema():
global IMAGE_SCHEMA
if IMAGE_SCHEMA is None:
schema_path = expanduser("~/.glanceclient/image_schema.json")
if os.path.exists(schema_path) and os.path.isfile(schema_path):
with file(schema_path, "r") as f:
schema_raw = f.read()
IMAGE_SCHEMA = json.loads(schema_raw)
return IMAGE_SCHEMA
@utils.schema_args(get_image_schema)
@utils.arg('--property', metavar="<key=value>", action='append',
default=[], help=('Arbitrary property to associate with image.'
' May be used multiple times.'))
def do_image_create(gc, args):
"""Create a new image."""
schema = gc.schemas.get("image")
_args = [(x[0].replace('-', '_'), x[1]) for x in vars(args).items()]
fields = dict(filter(lambda x: x[1] is not None and
(x[0] == 'property' or
schema.is_core_property(x[0])),
_args))
raw_properties = fields.pop('property', [])
for datum in raw_properties:
key, value = datum.split('=', 1)
fields[key] = value
image = gc.images.create(**fields)
ignore = ['self', 'access', 'file', 'schema']
image = dict([item for item in image.iteritems()
if item[0] not in ignore])
utils.print_dict(image)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to update.')
@utils.schema_args(get_image_schema, omit=['id'])
@utils.arg('--property', metavar="<key=value>", action='append',
default=[], help=('Arbitrary property to associate with image.'
' May be used multiple times.'))
@utils.arg('--remove-property', metavar="key", action='append', default=[],
help="Name of arbitrary property to remove from the image")
def do_image_update(gc, args):
"""Update an existing image."""
schema = gc.schemas.get("image")
_args = [(x[0].replace('-', '_'), x[1]) for x in vars(args).items()]
fields = dict(filter(lambda x: x[1] is not None and
(x[0] in ['property', 'remove_property'] or
schema.is_core_property(x[0])),
_args))
raw_properties = fields.pop('property', [])
for datum in raw_properties:
key, value = datum.split('=', 1)
fields[key] = value
remove_properties = fields.pop('remove_property', None)
image_id = fields.pop('id')
image = gc.images.update(image_id, remove_properties, **fields)
ignore = ['self', 'access', 'file', 'schema']
image = dict([item for item in image.iteritems()
if item[0] not in ignore])
utils.print_dict(image)
@utils.arg('--page-size', metavar='<SIZE>', default=None, type=int,
help='Number of images to request in each paginated request.')
@utils.arg('--visibility', metavar='<VISIBILITY>',
help='The visibility of the images to display.')
@utils.arg('--member-status', metavar='<MEMBER_STATUS>',
help='The status of images to display.')
@utils.arg('--owner', metavar='<OWNER>',
help='Display images owned by <OWNER>.')
@utils.arg('--checksum', metavar='<CHECKSUM>',
help='Display images matching the checksum')
@utils.arg('--tag', metavar='<TAG>', action='append',
help="Filter images by an user-defined tag.")
def do_image_list(gc, args):
"""List images you can access."""
filter_keys = ['visibility', 'member_status', 'owner', 'checksum', 'tag']
filter_items = [(key, getattr(args, key)) for key in filter_keys]
filters = dict([item for item in filter_items if item[1] is not None])
kwargs = {'filters': filters}
if args.page_size is not None:
kwargs['page_size'] = args.page_size
images = gc.images.list(**kwargs)
columns = ['ID', 'Name']
utils.print_list(images, columns)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to describe.')
def do_image_show(gc, args):
"""Describe a specific image."""
image = gc.images.get(args.id)
ignore = ['self', 'access', 'file', 'schema']
image = dict([item for item in image.iteritems() if item[0] not in ignore])
utils.print_dict(image)
@utils.arg('--image-id', metavar='<IMAGE_ID>', required=True,
help='Image to display members of.')
def do_member_list(gc, args):
"""Describe sharing permissions by image."""
members = gc.image_members.list(args.image_id)
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list(members, columns)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image from which to remove member')
@utils.arg('member_id', metavar='<MEMBER_ID>',
help='Tenant to remove as member')
def do_member_delete(gc, args):
"""Delete image member"""
if not (args.image_id and args.member_id):
utils.exit('Unable to delete member. Specify image_id and member_id')
else:
gc.image_members.delete(args.image_id, args.member_id)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image from which to update member')
@utils.arg('member_id', metavar='<MEMBER_ID>',
help='Tenant to update')
@utils.arg('member_status', metavar='<MEMBER_STATUS>',
help='Updated status of member')
def do_member_update(gc, args):
"""Update the status of a member for a given image."""
if not (args.image_id and args.member_id and args.member_status):
utils.exit('Unable to update member. Specify image_id, member_id and'
' member_status')
else:
member = gc.image_members.update(args.image_id, args.member_id,
args.member_status)
member = [member]
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list(member, columns)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image on which to create member')
@utils.arg('member_id', metavar='<MEMBER_ID>',
help='Tenant to add as member')
def do_member_create(gc, args):
"""Create member for a given image."""
if not (args.image_id and args.member_id):
utils.exit('Unable to create member. Specify image_id and member_id')
else:
member = gc.image_members.create(args.image_id, args.member_id)
member = [member]
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list(member, columns)
@utils.arg('model', metavar='<MODEL>', help='Name of model to describe.')
def do_explain(gc, args):
"""Describe a specific model."""
try:
schema = gc.schemas.get(args.model)
except exc.HTTPNotFound:
utils.exit('Unable to find requested model \'%s\'' % args.model)
else:
formatters = {'Attribute': lambda m: m.name}
columns = ['Attribute', 'Description']
utils.print_list(schema.properties, columns, formatters)
@utils.arg('--file', metavar='<FILE>',
help='Local file to save downloaded image data to. '
'If this is not specified the image data will be '
'written to stdout.')
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to download.')
@utils.arg('--progress', action='store_true', default=False,
help='Show download progress bar.')
def do_image_download(gc, args):
"""Download a specific image."""
body = gc.images.data(args.id)
if args.progress:
body = progressbar.VerboseIteratorWrapper(body, len(body))
utils.save_image(body, args.file)
@utils.arg('--file', metavar='<FILE>',
help=('Local file that contains disk image to be uploaded'
' during creation. Alternatively, images can be passed'
' to the client via stdin.'))
@utils.arg('id', metavar='<IMAGE_ID>',
help='ID of image to upload data to.')
def do_image_upload(gc, args):
"""Upload data for a specific image."""
image_data = utils.get_data_file(args)
gc.images.upload(args.id, image_data)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to delete.')
def do_image_delete(gc, args):
"""Delete specified image."""
gc.images.delete(args.id)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image to be updated with the given tag')
@utils.arg('tag_value', metavar='<TAG_VALUE>',
help='Value of the tag')
def do_image_tag_update(gc, args):
"""Update an image with the given tag."""
if not (args.image_id and args.tag_value):
utils.exit('Unable to update tag. Specify image_id and tag_value')
else:
gc.image_tags.update(args.image_id, args.tag_value)
image = gc.images.get(args.image_id)
image = [image]
columns = ['ID', 'Tags']
utils.print_list(image, columns)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image whose tag to be deleted')
@utils.arg('tag_value', metavar='<TAG_VALUE>',
help='Value of the tag')
def do_image_tag_delete(gc, args):
"""Delete the tag associated with the given image."""
if not (args.image_id and args.tag_value):
utils.exit('Unable to delete tag. Specify image_id and tag_value')
else:
gc.image_tags.delete(args.image_id, args.tag_value)
|
ntt-sic/python-glanceclient
|
glanceclient/v2/shell.py
|
Python
|
apache-2.0
| 10,082 | 0.000099 |
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
nums = [0 for _ in xrange(n + 1)]
for i in xrange(1, n + 1):
if i == 1:
nums[1] = 1
elif i == 2:
nums[2] = 2
else:
nums[i] = nums[i - 1] + nums[i - 2]
return nums[n]
|
Jacy-Wang/MyLeetCode
|
ClimbStairs70.py
|
Python
|
gpl-2.0
| 393 | 0 |
import rb
import rhythmdb
import dbus
import gconf
from xchat_music_channel.conf import gconf_keys, ConfDialog
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
class XChatMusicChannelPlugin(rb.Plugin):
def activate(self, shell):
gc = gconf.client_get_default()
self.server = gc.get_string(gconf_keys['server'])
self.channel = gc.get_string(gconf_keys['channel'])
self.shell = shell
self.player = shell.get_player()
self.event_id = self.player.connect('playing-song-changed', self.song_changed)
self.bus = dbus.SessionBus()
self.signal = None
self.xchat_object = None
self.xchat_hook = None
self.xchat_context = None
def deactivate(self, shell):
del self.xchat_context
if self.xchat_hook:
self.signal.remove()
self.get_xchat().Unhook(self.xchat_hook)
del self.xchat_hook
self.player.disconnect(self.event_id)
del self.event_id
del self.player
del self.shell
del self.channel
del self.server
del self.signal
del self.bus
def get_xchat(self):
xchat_object = self.bus.get_object('org.xchat.service', '/org/xchat/Remote')
return dbus.Interface(xchat_object, 'org.xchat.plugin')
def song_changed(self, player, entry):
xchat = self.get_xchat()
self.xchat_context = xchat.FindContext(self.server, self.channel)
if self.xchat_context:
try:
artist = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ARTIST)
title = self.shell.props.db.entry_get(entry, rhythmdb.PROP_TITLE)
album = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ALBUM)
except TypeError:
return
xchat.SetContext(self.xchat_context)
xchat.Command('say Playing: %s - %s (%s)' % (artist, title, album))
if not self.xchat_hook:
self.xchat_hook = xchat.HookPrint('Channel Message', 0, 0)
self.signal = self.bus.add_signal_receiver(
self.got_message,
'PrintSignal',
'org.xchat.plugin',
'org.xchat.service',
'/org/xchat/Remote'
)
elif self.xchat_hook:
self.signal.remove()
xchat.Unhook(self.xchat_hook)
self.xchat_hook = None
def got_message(self, data, priority, context):
if context == self.xchat_context:
msg = str(data[1])
if msg == 'next':
self.player.do_next()
def create_configure_dialog(self, dialog=None):
if not dialog:
builder_file = self.find_file('conf.ui')
dialog = ConfDialog(builder_file).dialog
dialog.present()
return dialog
|
jlecker/rhythmbox-xchat-music-channel
|
xchat_music_channel/__init__.py
|
Python
|
mit
| 2,960 | 0.004392 |
#! /usr/bin/env python
'''lint for sets of items
There are many things that really shouldn't exist with a set of items. One
of the biggest is that the dependencies should form a directed acyclic graph,
with absolutely no cycles.
e.g. given the set of items {a.b,c}, if a depends on b, b depends on c,
and c depends on a, there is a cycle in the dependency graph, which means
there is no valid order to satisfy the dependencies in:
a -> b -> c
^ |
`-------------'
Fig 1: Badness
This set of lint tools helps check for this and more. The bigger the set of
items and the more complex the dependencies in them, the more likely it
is that humans are going to miss something. This is no substitute for
humans putting in the correct data in the first place (like all lint tools,
this isn't going to pick up most errors, just some of them), but it should
help pick up some dire ones
'''
import core.bits
class LintError(Exception): pass
def check_dependencies_are_instances(item):
'''check that the item, contents, and dependencies of the item, are instances not classes
This is very important for task instances. You should never mix references between
Item instances in a task and the classes that they are built from. Obvious as this
seems, with the way that marshalling and unmarshalling happens, it is possible if there
are bugs in the loader or marshaller
We could check that they were instances of BaseItem, but that would be pretty un-pythonic
raises LintError if the item is of type 'type'
raises LintError unless all members of item.depends are not of type 'type'
raises LintError unless all members of item.contents are not of type 'type'
'''
if type(item) == type:
raise LintError("item is not an instance type",item)
for dep in item.depends:
if type(dep) == type:
raise LintError("item dependency is not an instance type",item,dep)
contains = getattr(item, 'contains', None)
if contains is not None:
for dep in item.contains:
if type(dep) == type:
raise LintError("group content is not an instance type",item,dep)
def check_predicate_returns_boolean(item):
'''return true iff an item's predicate returns something True or False
TODO: Figure out if this is a good idea. It's not very pythonic
'''
ret = item.predicate([])
if ret is not True and ret is not False:
raise LintError('item predicate does not return True or False', item, item.predicate)
if __name__ == "__main__":
class TestItem(object):
predicate = lambda x: True
depends = ()
try: # should fail
check_dependencies_are_instances(TestItem)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# Should be fine
check_dependencies_are_instances(TestItem())
try: # should fail
testinst = TestItem()
testinstb = TestItem()
testinst.depends = (testinstb, TestItem)
check_dependencies_are_instances(testinst)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# Should be fine
testinst = TestItem()
testinstb = TestItem()
testinst.depends = (testinstb,)
check_dependencies_are_instances(testinst)
try: # should fail
testinst = TestItem()
testinstb = TestItem()
testinst.contains = (testinstb, TestItem)
check_dependencies_are_instances(testinst)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# Should be fine
testinst = TestItem()
testinstb = TestItem()
testinst.contains = (testinstb,)
check_dependencies_are_instances(testinst)
try: # should fail
testinst = TestItem()
testinst.predicate = lambda x: "Oh joy"
check_predicate_returns_boolean(testinst)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# SHould be fine
testinst = TestItem()
testinst.predicate = lambda x: True
check_predicate_returns_boolean(testinst)
testinst.predicate = lambda x: False
check_predicate_returns_boolean(testinst)
|
anchor/make-magic
|
tools/lint.py
|
Python
|
bsd-3-clause
| 3,918 | 0.026289 |
# -*- coding: utf-8 -*-
"""
Unit tests covering the program listing and detail pages.
"""
import json
import re
from urlparse import urljoin
from uuid import uuid4
import mock
from bs4 import BeautifulSoup
from django.conf import settings
from django.urls import reverse, reverse_lazy
from django.test import override_settings
from lms.envs.test import CREDENTIALS_PUBLIC_SERVICE_URL
from openedx.core.djangoapps.catalog.tests.factories import CourseFactory, CourseRunFactory, ProgramFactory
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.credentials import STUDENT_RECORDS_FLAG
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory as ModuleStoreCourseFactory
PROGRAMS_UTILS_MODULE = 'openedx.core.djangoapps.programs.utils'
@skip_unless_lms
@override_settings(MKTG_URLS={'ROOT': 'https://www.example.com'})
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
class TestProgramListing(ProgramsApiConfigMixin, SharedModuleStoreTestCase):
"""Unit tests for the program listing page."""
shard = 4
maxDiff = None
password = 'test'
url = reverse_lazy('program_listing_view')
@classmethod
def setUpClass(cls):
super(TestProgramListing, cls).setUpClass()
cls.course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(cls.course.id)) # pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.first_program = ProgramFactory(courses=[course])
cls.second_program = ProgramFactory(courses=[course])
cls.data = sorted([cls.first_program, cls.second_program], key=cls.program_sort_key)
def setUp(self):
super(TestProgramListing, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
@classmethod
def program_sort_key(cls, program):
"""
Helper function used to sort dictionaries representing programs.
"""
return program['title']
def load_serialized_data(self, response, key):
"""
Extract and deserialize serialized data from the response.
"""
pattern = re.compile(r'{key}: (?P<data>\[.*\])'.format(key=key))
match = pattern.search(response.content)
serialized = match.group('data')
return json.loads(serialized)
def assert_dict_contains_subset(self, superset, subset):
"""
Verify that the dict superset contains the dict subset.
Works like assertDictContainsSubset, deprecated since Python 3.2.
See: https://docs.python.org/2.7/library/unittest.html#unittest.TestCase.assertDictContainsSubset.
"""
superset_keys = set(superset.keys())
subset_keys = set(subset.keys())
intersection = {key: superset[key] for key in superset_keys & subset_keys}
self.assertEqual(subset, intersection)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_empty_state(self, mock_get_programs):
"""
Verify that the response contains no programs data when no programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
response = self.client.get(self.url)
self.assertContains(response, 'programsData: []')
def test_programs_listed(self, mock_get_programs):
"""
Verify that the response contains accurate programs data when programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
self.assert_dict_contains_subset(actual_program, expected_program)
def test_program_discovery(self, mock_get_programs):
"""
Verify that a link to a programs marketing page appears in the response.
"""
self.create_programs_config(marketing_path='bar')
mock_get_programs.return_value = self.data
marketing_root = urljoin(settings.MKTG_URLS.get('ROOT'), 'bar').rstrip('/')
response = self.client.get(self.url)
self.assertContains(response, marketing_root)
def test_links_to_detail_pages(self, mock_get_programs):
"""
Verify that links to detail pages are present.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
expected_url = reverse('program_details_view', kwargs={'program_uuid': expected_program['uuid']})
self.assertEqual(actual_program['detail_url'], expected_url)
@skip_unless_lms
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
@override_waffle_flag(STUDENT_RECORDS_FLAG, active=True)
class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, SharedModuleStoreTestCase):
"""Unit tests for the program details page."""
shard = 4
program_uuid = str(uuid4())
password = 'test'
url = reverse_lazy('program_details_view', kwargs={'program_uuid': program_uuid})
@classmethod
def setUpClass(cls):
super(TestProgramDetails, cls).setUpClass()
modulestore_course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(modulestore_course.id))
course = CourseFactory(course_runs=[course_run])
cls.data = ProgramFactory(uuid=cls.program_uuid, courses=[course])
def setUp(self):
super(TestProgramDetails, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
def assert_program_data_present(self, response):
"""Verify that program data is present."""
self.assertContains(response, 'programData')
self.assertContains(response, 'urls')
self.assertContains(response,
'"program_record_url": "{}/records/programs/'.format(CREDENTIALS_PUBLIC_SERVICE_URL))
self.assertContains(response, 'program_listing_url')
self.assertContains(response, self.data['title'])
self.assert_programs_tab_present(response)
def assert_programs_tab_present(self, response):
"""Verify that the programs tab is present in the nav."""
soup = BeautifulSoup(response.content, 'html.parser')
self.assertTrue(
any(soup.find_all('a', class_='tab-nav-link', href=reverse('program_listing_view')))
)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
catalog_integration = self.create_catalog_integration()
UserFactory(username=catalog_integration.service_username)
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
with mock.patch('lms.djangoapps.learner_dashboard.programs.get_certificates') as certs:
certs.return_value = [{'type': 'program', 'url': '/'}]
response = self.client.get(self.url)
self.assert_program_data_present(response)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_404_if_no_data(self, mock_get_programs):
"""Verify that the page 404s if no program data is found."""
self.create_programs_config()
mock_get_programs.return_value = None
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
|
gsehub/edx-platform
|
lms/djangoapps/learner_dashboard/tests/test_programs.py
|
Python
|
agpl-3.0
| 9,854 | 0.00203 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._vmware_cloud_simple import VMwareCloudSimple
from ._version import VERSION
__version__ = VERSION
__all__ = ['VMwareCloudSimple']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-vmwarecloudsimple/azure/mgmt/vmwarecloudsimple/__init__.py
|
Python
|
mit
| 706 | 0.002833 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<quiz_slug>[-A-Za-z0-9_]+)/$', views.quiz, name='quiz'),
url(r'^(?P<quiz_slug>[-A-Za-z0-9_]+)/(?P<question_slug>[-A-Za-z0-9_]+)/$', views.question, name='question')
]
|
super1337/Super1337-CTF
|
questionnaire/urls.py
|
Python
|
mit
| 299 | 0.003344 |
import warnings
from collections import namedtuple, defaultdict
#Node = namedtuple('Node', ('id', 'data', 'edges', 'in_edges'))
#Edge = namedtuple('Edge', ('start', 'end', 'label', 'data', 'directed'))
class MiniGraphError(Exception): pass
class MiniGraphWarning(Warning): pass
# todo: consider functools.lru_cache for the retrieval methods
class MiniGraph(object):
__slots__ = ('_graph',)
def __init__(self, nodes=None, edges=None):
self._graph = {}
# nodes
if nodes is None:
nodes = {}
self.add_nodes(nodes)
# edges
if edges is None:
edges = {}
self.add_edges(edges)
@classmethod
def fast_init(cls, nodes=None, edges=None):
"""
Initializes the graph without argument checking of edges, which
means that all edges must be 5-tuples of:
(start, end, label, data, directed)
"""
mg = cls(nodes)
if edges is not None:
mg._fast_add_edges1(edges)
return mg
@classmethod
def fast_init2(cls, nodes, edges=None):
"""
Initializes the graph without argument checking of edges, which
means that all edges must be 5-tuples of:
(start, end, label, data, directed)
Furthermore, all edges must only uses nodes specified in the
nodes argument.
"""
mg = cls(nodes)
if edges is not None:
mg._fast_add_edges2(edges)
return mg
def __getitem__(self, idx):
"""
Fancy graph queries:
- if idx is an integer, return the node given by idx
- if idx is a slice, return the edges matching
start:end:label. Note that not specifying the label uses
the label of None, which is a valid label. If you want to
consider all labels, use Ellipsis: (g[0:1:...]). All edges
can be retrieved with g[::...].
"""
try:
start, end, label = idx.start, idx.stop, idx.step
if label is Ellipsis:
return self.find_edges(start, end)
else:
return self.find_edges(start, end, label=label)
except AttributeError:
return (idx, self.nodes[idx])
def add_node(self, nodeid, data=None):
# if nodeid in self.nodes:
# raise MiniGraphError('Node already exists: {}'.format(nodeid))
#self.nodes[nodeid] = dict(data or [])
if data is None:
data = {}
if nodeid in self._graph:
self._graph[nodeid][1].update(data)
else:
self._graph[nodeid] = (nodeid, data, {}, {})
def add_nodes(self, nodes):
for node in nodes:
try:
node, data = node
except TypeError:
data = {}
self.add_node(node, data=data)
def remove_node(self, nodeid):
g = self._graph
if nodeid not in g:
raise KeyError(nodeid)
_prune_edges(g, nodeid)
del g[nodeid]
def node(self, nodeid):
return self._graph[nodeid]
def nodes(self):
return [(nid, n[1]) for nid, n in self._graph.items()]
def add_edge(self, start, end, label=None, data=None, directed=True):
self.add_edges([(start, end, label, data, directed)])
#@profile
def add_edges(self, edges):
g = self._graph
add_edge = _add_edge
for edge in edges:
edgelen = len(edge)
if edgelen == 5:
start, end, label, data, directed = edge
elif edgelen == 2:
start, end = edge; label = data = None; directed = True
elif edgelen == 4:
start, end, label, data = edge; directed = True
elif edgelen == 3:
start, end, label = edge; data = None; directed = True
else:
raise MiniGraphError('Invalid edge: {}'.format(edge))
if data is None: data = {}
if start not in g: g[start] = (start, {}, {}, {})
if end not in g: g[end] = (end, {}, {}, {})
e = (start, end, label, data, directed)
#add_edge(g[start][2], label, end, e)
d = g[start][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
#add_edge(g[end][3], label, start, e)
d = g[end][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
if directed is False:
#add_edge(g[end][2], label, start, e)
d = g[end][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
#add_edge(g[start][3], label, end, e)
d = g[start][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
def _fast_add_edges1(self, edges):
g = self._graph
add_edge = _add_edge
for e in edges:
start = e[0]
end = e[1]
label = e[2]
directed = e[4]
if start not in g:
g[start] = (start, {}, {}, {})
if end not in g:
g[end] = (end, {}, {}, {})
#add_edge(g[start][2], label, end, e)
d = g[start][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
#add_edge(g[end][3], label, start, e)
d = g[end][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
if directed is False:
#add_edge(g[end][2], label, start, e)
d = g[end][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
#add_edge(g[start][3], label, end, e)
d = g[start][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
def _fast_add_edges2(self, edges):
g = self._graph
add_edge = _add_edge
for e in edges:
start = e[0]
end = e[1]
label = e[2]
directed = e[4]
add_edge(g[start][2], label, end, e)
add_edge(g[end][3], label, start, e)
if directed is False:
add_edge(g[end][2], label, start, e)
add_edge(g[start][3], label, end, e)
def remove_edge(self, start, end, label=None, directed=None):
g = self._graph
if start not in g: raise KeyError(start)
edges = g[start][2]
if label not in edges: raise KeyError(label)
if end not in edges[label]: raise KeyError(end)
_dir = g[start][2][label][end][4]
if directed is not None:
assert _dir == directed
try:
in_edges = g[end][3]
del edges[label][end]
if len(edges[label]) == 0:
del edges[label]
del in_edges[label][start]
if len(in_edges[label]) == 0:
del in_edges[label]
# undirected links are listed twice (except simple loops)
if not _dir and start != end:
edges = g[end][2]
in_edges = g[start][3]
del edges[label][start]
if len(edges[label]) == 0:
del edges[label]
del in_edges[label][end]
if len(in_edges[label]) == 0:
del in_edges[label]
except KeyError:
raise
warnings.warn(
'Unexpected KeyError while removing {} edge ({}, {}, {})'
.format('directed' if directed else 'undirected',
start, end, label),
MiniGraphWarning
)
def edge(self, start, end, label=None, directed=None):
e = self._graph[start][2][label][end]
if directed is not None:
assert e[4] == directed
return e
def edges(self):
return [e
for nid, n in self._graph.items()
for ed in n[2].values()
for e in ed.values()
# only include undirected links from the source node (whatever
# the source node was when it was instantiated)
if e[4] or e[0] == nid
]
def find_edges(self, start=None, end=None, **kwargs):
if start is Ellipsis: start = None
if end is Ellipsis: end = None
# get appropriate edge dicts (both if 'directed' is unspecified)
if 'directed' in kwargs:
if kwargs['directed'] is True: xs = [self.edges]
elif kwargs['directed'] is False: xs = [self.uedges]
else: xs = [self.edges, self.uedges]
# filter by start, if specified
if start is None:
xs = [(s, sd) for d in xs for s, sd in d.items()]
else:
xs = [(start, d[start]) for d in xs if start in d]
# filter by label, if specified
try:
lbl = kwargs['label']
xs = ((s, lbl, sd[lbl]) for s, sd in xs if lbl in sd)
except KeyError:
xs = ((s, lbl, ld) for s, sd in xs for lbl, ld in sd.items())
# filter by end, if specified
if end is None:
xs = ((s, e, lbl, d) for s, lbl, ld in xs for e, d in ld.items())
else:
xs = ((s, end, lbl, ld[end]) for s, lbl, ld in xs if end in ld)
# filter by data, if specified
try:
data = kwargs['data']
xs = filter(
lambda s,e,l,d: all(d.get(k) == v for k, v in data.items()),
xs
)
except KeyError:
pass
return list(xs)
def order(self):
return len(self._graph)
def size(self):
return len(self.edges())
def degree(self, nodeid):
n = self._graph[nodeid]
return (
sum(len(ed) for ed in n[2].values()) +
len([
e for ed in n[3].values() for e in ed.values()
# only count undirected edges here if they are simple loops
if e[4] or e[0] == e[1]
])
)
def out_degree(self, nodeid):
n = self._graph[nodeid]
return sum(len(ed) for ed in n[2].values())
# return (
# sum(len(ed) for ed in n[2].values()) +
# len([e for ed in n[3].values()
# for e in ed.values()
# if e[4] == False and e[0] != e[1]])
# )
def in_degree(self, nodeid):
n = self._graph[nodeid]
return sum(len(ed) for ed in n[3].values())
# return (
# sum(len(ed) for ed in n[3].values()) +
# len([e for ed in n[2].values()
# for e in ed.values()
# if e[4] == False and e[0] != e[1]])
# )
def subgraph(self, nodeids):
g = self._graph
nidset = set(nodeids)
return MiniGraph(
nodes=[(nid, g[nid][1]) for nid in nodeids],
edges=[e for start in nodeids
for label, ed in g[start][2].items()
for end, e in ed.items() if end in nidset]
)
# def connected(self):
# nodeset = set()
# remaining = set(self.nodes.keys())
# for start in self.nodes:
# if node not in nodeset:
# nodeset.add(node)
# def _degree(nodeid, edgedicts):
# ds = []
# for d in edgedicts:
# if nodeid in d:
# ds.append(d[nodeid])
# return sum(len(ld) for d in ds for ld in d.values())
def _prune_edges(graph, nodeid):
g = graph[nodeid]
# forward links; remove reverse links on ends
edict = defaultdict(list)
for ed in g[2].values():
for e in ed.values():
if e[1] != nodeid: # this will get removed anyway
edict[e[1]].append(e)
for end, es in edict.items():
ld = graph[end][3]
for e in es:
del ld[e[2]][e[0]]
if len(ld[e[2]]) == 0:
del ld[e[2]]
# backward links; remove forward links on starts
edict = defaultdict(list)
for ed in g[3].values():
for e in ed.values():
if e[0] != nodeid: # this will get removed anyway
edict[e[0]].append(e)
for start, es in edict.items():
ld = graph[start][2]
for e in es:
del ld[e[2]][e[1]]
if len(ld[e[2]]) == 0:
del ld[e[2]]
# for a bit more speed, this can be inlined directly
def _add_edge(d, label, idx, e):
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if idx not in innerdict:
innerdict[idx] = e
else:
if innerdict[idx][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[idx][3].update(e[3])
|
goodmami/minigraph
|
minigraph.py
|
Python
|
mit
| 16,121 | 0.003102 |
#
# Solution to Project Euler problem 52
# Philippe Legault
#
# https://github.com/Bathlamos/Project-Euler-Solutions
import itertools
def compute():
c = 1
while True:
lists = [digits(c * n) for n in range(1, 7)]
if len(set(lists)) == 1: # Check that all elements are equal
return c
c += 1
def digits(n):
res = []
while n != 0:
res.append(n % 10)
n /= 10
return tuple(sorted(res))
if __name__ == "__main__":
print(compute())
|
Bathlamos/Project-Euler-Solutions
|
solutions/p052.py
|
Python
|
mit
| 451 | 0.035477 |
# Test for creating custom render targets.
from SRPScripting import *
import utils
rt = ri.CreateRenderTarget()
testTexCallback = utils.GetTestTextureCallback(ri, rt, "FullscreenTexture_PS", "tex")
def RenderFrame(context):
context.Clear((1, 0.5, 0, 1), [rt])
testTexCallback(context)
ri.SetFrameCallback(RenderFrame)
|
simontaylor81/Syrup
|
SRPTests/TestScripts/Python/RenderTarget.py
|
Python
|
mit
| 326 | 0.021472 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
Delaunay.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from sets import Set
from PyQt4.QtCore import *
from qgis.core import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from sextante.core.QGisLayers import QGisLayers
from sextante.parameters.ParameterVector import ParameterVector
from sextante.outputs.OutputVector import OutputVector
from sextante.algs.ftools import voronoi
class Delaunay(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/delaunay.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Delaunay triangulation"
self.group = "Vector geometry tools"
self.addParameter(ParameterVector(self.INPUT, "Input layer", ParameterVector.VECTOR_TYPE_POINT))
self.addOutput(OutputVector(self.OUTPUT, "Delaunay triangulation"))
def processAlgorithm(self, progress):
layer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT))
fields = [QgsField("POINTA", QVariant.Double, "", 24, 15),
QgsField("POINTB", QVariant.Double, "", 24, 15),
QgsField("POINTC", QVariant.Double, "", 24, 15)
]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
QGis.WKBPolygon, layer.crs())
pts = []
ptDict = {}
ptNdx = -1
c = voronoi.Context()
features = QGisLayers.features(layer)
for inFeat in features:
geom = QgsGeometry(inFeat.geometry())
point = geom.asPoint()
x = point.x()
y = point.y()
pts.append((x, y))
ptNdx += 1
ptDict[ptNdx] = inFeat.id()
if len(pts) < 3:
raise GeoAlgorithmExecutionException("Input file should contain at least 3 points. Choose another file and try again.")
uniqueSet = Set(item for item in pts)
ids = [pts.index(item) for item in uniqueSet]
sl = voronoi.SiteList([voronoi.Site(*i) for i in uniqueSet])
c.triangulate = True
voronoi.voronoi(sl, c)
triangles = c.triangles
feat = QgsFeature()
current = 0
total = 100.0 / float(len(triangles))
for triangle in triangles:
indicies = list(triangle)
indicies.append(indicies[0])
polygon = []
attrs = []
step = 0
for index in indicies:
request = QgsFeatureRequest().setFilterFid(ptDict[ids[index]])
inFeat = layer.getFeatures(request).next()
geom = QgsGeometry(inFeat.geometry())
point = QgsPoint(geom.asPoint())
polygon.append(point)
if step <= 3:
attrs.append(ids[index])
step += 1
feat.setAttributes(attrs)
geometry = QgsGeometry().fromPolygon([polygon])
feat.setGeometry(geometry)
writer.addFeature(feat)
current += 1
progress.setPercentage(int(current * total))
del writer
|
alexgleith/Quantum-GIS
|
python/plugins/sextante/algs/ftools/Delaunay.py
|
Python
|
gpl-2.0
| 4,431 | 0.002257 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-19 19:08
from __future__ import unicode_literals
import channels.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("channels", "0001_add_tokens"),
]
operations = [
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("post_id", channels.models.Base36IntegerField()),
("comment_id", channels.models.Base36IntegerField(null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AlterUniqueTogether(
name="subscription",
unique_together=set([("user", "post_id", "comment_id")]),
),
migrations.AlterIndexTogether(
name="subscription", index_together=set([("post_id", "comment_id")])
),
]
|
mitodl/open-discussions
|
channels/migrations/0002_add_subscription.py
|
Python
|
bsd-3-clause
| 1,675 | 0.000597 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import fixture as config_fixture
from keystone.identity.backends import ldap
from keystone.tests.unit import core
from keystone.tests.unit.identity.backends import test_base
from keystone.tests.unit.ksfixtures import ldapdb
class TestIdentityDriver(core.BaseTestCase,
test_base.IdentityDriverTests):
allows_name_update = False
allows_self_service_change_password = False
expected_is_domain_aware = False
expected_default_assignment_driver = 'sql'
expected_is_sql = False
expected_generates_uuids = False
def setUp(self):
super(TestIdentityDriver, self).setUp()
config_fixture_ = self.useFixture(config_fixture.Config())
config_fixture_.config(
group='ldap',
url='fake://memory',
user='cn=Admin',
password='password',
suffix='cn=example,cn=com')
self.useFixture(ldapdb.LDAPDatabase())
self.driver = ldap.Identity()
|
ilay09/keystone
|
keystone/tests/unit/identity/backends/test_ldap.py
|
Python
|
apache-2.0
| 1,538 | 0 |
#!/usr/bin/python
import sys
sys.path.append("/Users/niels/git/nanokong/tools/python")
import wkpf
from wkpf import WuObject
numericInputWuObject = WuObject(nodeId=1, portNumber=1, wuclassId=3)
lightSensorWuObject = WuObject(nodeId=1, portNumber=2, wuclassId=5)
thresholdWuObjectScenario1 = WuObject(nodeId=1, portNumber=3, wuclassId=1)
thresholdWuObjectScenario2 = WuObject(nodeId=3, portNumber=3, wuclassId=1)
occupancyWuObject = WuObject(nodeId=1, portNumber=5, wuclassId=0x1005)
andGateWuObject = WuObject(nodeId=3, portNumber=6, wuclassId=0x1006)
lightWuObject = WuObject(nodeId=3, portNumber=4, wuclassId=4)
wuobjectsNode1 = wkpf.getWuObjectList(1)
wuobjectsNode3 = wkpf.getWuObjectList(3)
wuclasses = wkpf.getWuClassList(3)
if 0x1006 in wuclasses: # Scenario 2
light_sensor_value = wkpf.getProperty(lightSensorWuObject, propertyNumber=0)
input_value = wkpf.getProperty(numericInputWuObject, propertyNumber=0)
threshold_operator = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=0)
threshold_threshold = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=1)
threshold_value = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=2)
threshold_output = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=3)
occupancy_value = wkpf.getProperty(occupancyWuObject, propertyNumber=0)
andgate_in1 = wkpf.getProperty(andGateWuObject, propertyNumber=0)
andgate_in2 = wkpf.getProperty(andGateWuObject, propertyNumber=1)
andgate_out = wkpf.getProperty(andGateWuObject, propertyNumber=2)
light_value = wkpf.getProperty(lightWuObject, propertyNumber=0)
print ""
print ""
print "=== Light sensor"
print "value:", light_sensor_value
print "=== Input"
print "value:", input_value
print "=== Threshold"
print "operator:", threshold_operator
print "threshold:", threshold_threshold
print "value:", threshold_value
print "output:", threshold_output
print "=== Occupacy"
print "value:", occupancy_value
print "=== And Gate"
print "in1 (threshold):", andgate_in1
print "in2 (occupancy):", andgate_in2
print "value:", andgate_out
print "=== Light"
print "value:", light_value
print "=== WuObjects on node 1"
print wuobjectsNode1
print "=== WuObjects on node 3"
print wuobjectsNode3
else: # Scenario 1
light_sensor_value = wkpf.getProperty(lightSensorWuObject, propertyNumber=0)
input_value = wkpf.getProperty(numericInputWuObject, propertyNumber=0)
threshold_operator = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=0)
threshold_threshold = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=1)
threshold_value = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=2)
threshold_output = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=3)
light_value = wkpf.getProperty(lightWuObject, propertyNumber=0)
print ""
print ""
print "=== Light sensor"
print "value:", light_sensor_value
print "=== Input"
print "value:", input_value
print "=== Threshold"
print "operator:", threshold_operator
print "threshold:", threshold_threshold
print "value:", threshold_value
print "output:", threshold_output
print "=== Light"
print "value:", light_value
print "=== WuObjects on node 1"
print wuobjectsNode1
print "=== WuObjects on node 3"
print wuobjectsNode3
|
wukong-m2m/NanoKong
|
tools/demo20120423/showNodeStatus.py
|
Python
|
gpl-2.0
| 3,336 | 0.021882 |
# Copyright (c) 2016, Kevin Rodgers
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
import redis
from uuid import uuid4
UUID = 'uuid'
class SimpleRedisDb(object):
def __init__(self, host, key, port=6379):
"""
:param host: database host
:param port: database port default 6397
:param key: hash set name default active_mail_filter
"""
self.host = host
self.port = port
self.key = key
self.redis = None
def __str__(self):
return 'SimpleRedisDb [host=%s, port=%d, key=%s, redis=%s]' % \
(self.host, self.port, self.key, str(self.redis))
def __del__(self):
"""
Close database on delete of object
:return:
"""
self._close_db()
def _open_db(self):
"""
Opens/reopens database if necessary
:return:
"""
if self.redis is None:
self.redis = redis.Redis(connection_pool=redis.ConnectionPool(host=self.host, port=self.port, db=0))
def _close_db(self):
"""
Closes database
:return:
"""
if self.redis is not None:
del self.redis
self.redis = None
def _clear_all(self):
"""
Removes all keys from hash set
:return:
"""
self._open_db()
record_keys = self.redis.hkeys(self.key)
for u in record_keys:
self.redis.hdel(self.key, u)
def get_record(self, record_key):
"""
Return record dictionary for specified UUID
:param record_key:
:return:
record dictionary or None if not found
"""
self._open_db()
record_str = self.redis.hget(self.key, record_key)
if record_str is None:
raise LookupError('%s key not found' % record_key)
return eval(record_str)
def get_all_records(self):
"""
Return a list of all records
:return:
list of all record dictionaries
"""
self._open_db()
all_records = []
record_keys = self.redis.hkeys(self.key)
for u in record_keys:
record_str = self.redis.hget(self.key, u)
if record_str is not None:
all_records.append(eval(record_str))
return all_records
def add_record(self, record_dict):
"""
Add a record to the hash set, auto generate UUID
:param record_dict: record dictionary
:return:
hash set key or UUID generated for volume dictionary
"""
self._open_db()
record_dict[UUID] = unicode(uuid4())
self.redis.hset(self.key, record_dict[UUID], unicode(record_dict))
return record_dict[UUID]
def delete_record(self, record_key):
"""
Delete record from hash set by UUID
:param record_key:
:return:
"""
self._open_db()
self.redis.hdel(self.key, record_key)
def update_record(self, record_key, record_dict):
"""
Update/replace record dictionary by UUID
:param record_key: UUID
:param record_dict: volume dictionary
:return:
"""
self._open_db()
record_dict[UUID] = record_key
self.redis.hset(self.key, record_dict[UUID], unicode(record_dict))
|
kfrodgers/active-mail-filter
|
active_mail_filter/simple_db.py
|
Python
|
bsd-2-clause
| 3,390 | 0.000295 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import werkzeug
from odoo import http, _
from odoo.addons.auth_signup.models.res_users import SignupError
from odoo.addons.web.controllers.main import ensure_db, Home
from odoo.exceptions import UserError
from odoo.http import request
_logger = logging.getLogger(__name__)
class AuthSignupHome(Home):
@http.route()
def web_login(self, *args, **kw):
ensure_db()
response = super(AuthSignupHome, self).web_login(*args, **kw)
response.qcontext.update(self.get_auth_signup_config())
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
return response
@http.route('/web/signup', type='http', auth='public', website=True, sitemap=False)
def web_auth_signup(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('signup_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
self.do_signup(qcontext)
# Send an account creation confirmation email
if qcontext.get('token'):
user_sudo = request.env['res.users'].sudo().search([('login', '=', qcontext.get('login'))])
template = request.env.ref('auth_signup.mail_template_user_signup_account_created', raise_if_not_found=False)
if user_sudo and template:
template.sudo().with_context(
lang=user_sudo.lang,
auth_login=werkzeug.url_encode({'auth_login': user_sudo.email}),
password=request.params.get('password')
).send_mail(user_sudo.id, force_send=True)
return super(AuthSignupHome, self).web_login(*args, **kw)
except UserError as e:
qcontext['error'] = str(e)
except (SignupError, AssertionError) as e:
if request.env["res.users"].sudo().search([("login", "=", qcontext.get("login"))]):
qcontext["error"] = _("Another user is already registered using this email address.")
else:
_logger.error("%s", e)
qcontext['error'] = _("Could not create a new account.")
return request.render('auth_signup.signup', qcontext)
@http.route('/web/reset_password', type='http', auth='public', website=True, sitemap=False)
def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
if qcontext.get('token'):
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
else:
login = qcontext.get('login')
assert login, _("No login provided.")
_logger.info(
"Password reset attempt for <%s> by user <%s> from %s",
login, request.env.user.login, request.httprequest.remote_addr)
request.env['res.users'].sudo().reset_password(login)
qcontext['message'] = _("An email has been sent with credentials to reset your password")
except SignupError:
qcontext['error'] = _("Could not reset your password")
_logger.exception('error when resetting password')
except Exception as e:
qcontext['error'] = str(e)
response = request.render('auth_signup.reset_password', qcontext)
response.headers['X-Frame-Options'] = 'DENY'
return response
def get_auth_signup_config(self):
"""retrieve the module config (which features are enabled) for the login page"""
get_param = request.env['ir.config_parameter'].sudo().get_param
return {
'signup_enabled': get_param('auth_signup.allow_uninvited') == 'True',
'reset_password_enabled': get_param('auth_signup.reset_password') == 'True',
}
def get_auth_signup_qcontext(self):
""" Shared helper returning the rendering context for signup and reset password """
qcontext = request.params.copy()
qcontext.update(self.get_auth_signup_config())
if not qcontext.get('token') and request.session.get('auth_signup_token'):
qcontext['token'] = request.session.get('auth_signup_token')
if qcontext.get('token'):
try:
# retrieve the user info (name, login or email) corresponding to a signup token
token_infos = request.env['res.partner'].sudo().signup_retrieve_info(qcontext.get('token'))
for k, v in token_infos.items():
qcontext.setdefault(k, v)
except:
qcontext['error'] = _("Invalid signup token")
qcontext['invalid_token'] = True
return qcontext
def do_signup(self, qcontext):
""" Shared helper that creates a res.partner out of a token """
values = { key: qcontext.get(key) for key in ('login', 'name', 'password') }
if not values:
raise UserError(_("The form was not properly filled in."))
if values.get('password') != qcontext.get('confirm_password'):
raise UserError(_("Passwords do not match; please retype them."))
supported_langs = [lang['code'] for lang in request.env['res.lang'].sudo().search_read([], ['code'])]
if request.lang in supported_langs:
values['lang'] = request.lang
self._signup_with_values(qcontext.get('token'), values)
request.env.cr.commit()
def _signup_with_values(self, token, values):
db, login, password = request.env['res.users'].sudo().signup(values, token)
request.env.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction
uid = request.session.authenticate(db, login, password)
if not uid:
raise SignupError(_('Authentication Failed.'))
|
Aravinthu/odoo
|
addons/auth_signup/controllers/main.py
|
Python
|
agpl-3.0
| 6,625 | 0.003774 |
#!/usr/bin/python
#-*-coding:utf8-*-
from bs4 import BeautifulSoup as Soup
#import pandas as pd
import glob
import sys
import re
"""
Version xml de cfdi 3.3
"""
class CFDI(object):
def __init__(self, f):
"""
Constructor que requiere en el parámetro una cadena con el nombre del
cfdi.
"""
fxml = open(f,'r').read()
soup = Soup(fxml,'lxml')
#============componentes del cfdi============
emisor = soup.find('cfdi:emisor')
receptor = soup.find('cfdi:receptor')
comprobante = soup.find('cfdi:comprobante')
tfd = soup.find('tfd:timbrefiscaldigital')
self.__version = comprobante['version']
self.__folio = comprobante['folio']
self.__uuid = tfd['uuid']
self.__fechatimbrado = tfd['fechatimbrado']
self.__traslados = soup.find_all(lambda e: e.name=='cfdi:traslado' and
sorted(e.attrs.keys())==['importe','impuesto','tasaocuota','tipofactor'])
self.__retenciones = soup.find_all(lambda e: e.name=='cfdi:retencion' and
sorted(e.attrs.keys())==['importe','impuesto'])
#============emisor==========================
self.__emisorrfc = emisor['rfc']
try:
self.__emisornombre = emisor['nombre']
except:
self.__emisornombre = emisor['rfc']
#============receptor========================
self.__receptorrfc = receptor['rfc']
try:
self.__receptornombre = receptor['nombre']
except:
self.__receptornombre = receptor['rfc']
#============comprobante=====================
self.__certificado = comprobante['certificado']
self.__sello = comprobante['sello']
self.__total = round(float(comprobante['total']),2)
self.__subtotal = round(float(comprobante['subtotal']),2)
self.__fecha_cfdi = comprobante['fecha']
self.__conceptos = soup.find_all(lambda e: e.name=='cfdi:concepto')
self.__n_conceptos = len(self.__conceptos)
try:
self.__moneda = comprobante['moneda']
except KeyError as k:
self.__moneda = 'MXN'
try:
self.__lugar = comprobante['lugarexpedicion']
except KeyError as k:
self.__lugar = u'México'
tipo = comprobante['tipodecomprobante']
if(float(self.__version)==3.2):
self.__tipo = tipo
else:
tcomprobantes = {'I':'Ingreso', 'E':'Egreso', 'N':'Nomina', 'P':'Pagado'}
self.__tipo = tcomprobantes[tipo]
try:
self.__tcambio = float(comprobante['tipocambio'])
except:
self.__tcambio = 1.
triva, trieps, trisr = self.__calcula_traslados()
self.__triva = round(triva,2)
self.__trieps = round(trieps,2)
self.__trisr = round(trisr,2)
retiva, retisr = self.__calcula_retenciones()
self.__retiva = round(retiva,2)
self.__retisr = round(retisr,2)
def __str__(self):
"""
Imprime el cfdi en el siguiente orden
emisor, fecha de timbrado, tipo de comprobante, rfc emisor, uuid,_
receptor, rfc receptor, subtotal, ieps, iva, retiva, retisr, tc, total
"""
respuesta = '\t'.join( map(str, self.lista_valores))
return respuesta
def __calcula_traslados(self):
triva, trieps, trisr = 0., 0., 0
for t in self.__traslados:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if impuesto=='IVA':
triva += importe
elif impuesto=='ISR':
trisr += importe
elif impuesto=='IEPS':
trieps += importe
elif(self.__version=='3.3'):
if impuesto=='002':
triva += importe
elif impuesto=='001':
trisr += importe
elif impuesto=='003':
trieps += importe
return triva, trieps, trisr
def __calcula_retenciones(self):
retiva, retisr = 0., 0.
for t in self.__retenciones:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if(impuesto=='ISR'):
retisr += importe
elif(impuesto=='IVA'):
retiva += importe
elif(self.__version=='3.3'):
if(impuesto=='002'):
retiva += importe
elif(impuesto=='001'):
retisr += importe
return retiva, retisr
@property
def lista_valores(self):
v = [self.__emisornombre,self.__fechatimbrado, self.__tipo, self.__emisorrfc ]
v += [self.__uuid, self.__folio, self.__receptornombre, self.__receptorrfc ]
v += [self.__subtotal, self.__trieps, self.__triva]
v += [self.__retiva, self.__retisr, self.__tcambio, self.__total]
return v
@property
def dic_cfdi(self):
d = {}
d["Emisor"] = self.__emisornombre
d["Fecha_CFDI"] = self.__fechatimbrado
d["Tipo"] = self.__tipo
d["RFC_Emisor"] = self.__emisorrfc
d["Folio_fiscal"] = self.__uuid
d["Folio"] = self.__folio
d["Receptor"] = self.__receptornombre
d["RFC_Receptor"] = self.__receptorrfc
d["Subtotal"] = self.__subtotal
d["IEPS"] = self.__trieps
d["IVA"] = self.__triva
d["Ret IVA"] = self.__retiva
d["Ret ISR"] = self.__retisr
d["TC"] = self.__tcambio
d["Total"] = self.__total
return d
@property
def certificado(self):
return self.__certificado
@property
def sello(self):
return self.__sello
@property
def total(self):
return self.__total
@property
def subtotal(self):
return self.__subtotal
@property
def fechatimbrado(self):
return self.__fechatimbrado
@property
def tipodecambio(self):
return self.__tcambio
@property
def lugar(self):
return self.__lugar
@property
def moneda(self):
return self.__moneda
@property
def traslado_iva(self):
return self.__triva
@property
def traslado_isr(self):
return self.__trisr
@property
def traslado_ieps(self):
return self.__trieps
@property
def n_conceptos(self):
return self.__n_conceptos
@property
def conceptos(self):
return self.__conceptos
@property
def folio(self):
return self.__folio
@staticmethod
def columnas():
return ["Emisor","Fecha_CFDI","Tipo","RFC_Emisor","Folio_fiscal","Folio","Receptor",
"RFC_Receptor", "Subtotal","IEPS","IVA","Ret IVA","Ret ISR","TC","Total"]
@staticmethod
def imprime_reporte(nf, nr):
reporte = "Número de archivos procesados:\t {}\n".format(nf)
reporte += "Número de filas en tsv:\t {}\n".format(nr)
if(nf!=nr):
reporte += "\n\n**** Atención ****\n"
return reporte
L = glob.glob('./*.xml')
#R = [ patt[1:].strip().lower() for patt in re.findall('(<cfdi:[A-z]*\s|<tfd:[A-z]*\s)',fxml)]
if __name__=='__main__':
salida = sys.argv[1]
fout = open(salida,'w')
columnas = CFDI.columnas()
titulo = '\t'.join(columnas)+'\n'
fout.write(titulo)
nl = 0
for f in L:
try:
#print("abriendo {0}".format(f))
rcfdi = CFDI(f)
dic = rcfdi.dic_cfdi
vals = [dic[c] for c in columnas]
strvals = ' \t '.join(map(str, vals))+'\n'
fout.write(strvals)
nl += 1
except:
assert "Error en archivo {0}".format(f)
fout.close()
nr = len(L)
rep = CFDI.imprime_reporte(nr, nl)
print(rep)
|
sergiohzlz/lectorcfdi
|
extrainfo.py
|
Python
|
apache-2.0
| 8,345 | 0.016906 |
from collections.abc import MutableMapping
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from typing import Any
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Type
from typing import Union
# avoid actually importing these, they're only used by type comments
from . import item
from . import manifest
if MYPY:
TypeDataType = MutableMapping[Tuple[Text, ...], Set[item.ManifestItem]]
PathHashType = MutableMapping[Tuple[Text, ...], Text]
else:
TypeDataType = MutableMapping
PathHashType = MutableMapping
class TypeData(TypeDataType):
def __init__(self, m, type_cls):
# type: (manifest.Manifest, Type[item.ManifestItem]) -> None
"""Dict-like object containing the TestItems for each test type.
Loading an actual Item class for each test is unnecessarily
slow, so this class allows lazy-loading of the test
items. When the manifest is loaded we store the raw json
corresponding to the test type, and only create an Item
subclass when the test is accessed. In order to remain
API-compatible with consumers that depend on getting an Item
from iteration, we do egerly load all items when iterating
over the class."""
self._manifest = m
self._type_cls = type_cls # type: Type[item.ManifestItem]
self._json_data = {} # type: Dict[Text, Any]
self._data = {} # type: Dict[Text, Any]
self._hashes = {} # type: Dict[Tuple[Text, ...], Text]
self.hashes = PathHash(self)
def _delete_node(self, data, key):
# type: (Dict[Text, Any], Tuple[Text, ...]) -> None
"""delete a path from a Dict data with a given key"""
path = []
node = data
for pathseg in key[:-1]:
path.append((node, pathseg))
node = node[pathseg]
if not isinstance(node, dict):
raise KeyError(key)
del node[key[-1]]
while path:
node, pathseg = path.pop()
if len(node[pathseg]) == 0:
del node[pathseg]
else:
break
def __getitem__(self, key):
# type: (Tuple[Text, ...]) -> Set[item.ManifestItem]
node = self._data # type: Union[Dict[Text, Any], Set[item.ManifestItem], List[Any]]
for pathseg in key:
if isinstance(node, dict) and pathseg in node:
node = node[pathseg]
else:
break
else:
if isinstance(node, set):
return node
else:
raise KeyError(key)
node = self._json_data
found = False
for pathseg in key:
if isinstance(node, dict) and pathseg in node:
node = node[pathseg]
else:
break
else:
found = True
if not found:
raise KeyError(key)
if not isinstance(node, list):
raise KeyError(key)
self._hashes[key] = node[0]
data = set()
path = "/".join(key)
for test in node[1:]:
manifest_item = self._type_cls.from_json(self._manifest, path, test)
data.add(manifest_item)
node = self._data
assert isinstance(node, dict)
for pathseg in key[:-1]:
node = node.setdefault(pathseg, {})
assert isinstance(node, dict)
assert key[-1] not in node
node[key[-1]] = data
self._delete_node(self._json_data, key)
return data
def __setitem__(self, key, value):
# type: (Tuple[Text, ...], Set[item.ManifestItem]) -> None
try:
self._delete_node(self._json_data, key)
except KeyError:
pass
node = self._data
for i, pathseg in enumerate(key[:-1]):
node = node.setdefault(pathseg, {})
if not isinstance(node, dict):
raise KeyError("%r is a child of a test (%r)" % (key, key[:i+1]))
node[key[-1]] = value
def __delitem__(self, key):
# type: (Tuple[Text, ...]) -> None
try:
self._delete_node(self._data, key)
except KeyError:
self._delete_node(self._json_data, key)
else:
try:
del self._hashes[key]
except KeyError:
pass
def __iter__(self):
# type: () -> Iterator[Tuple[Text, ...]]
"""Iterator over keys in the TypeData in codepoint order"""
data_node = self._data # type: Optional[Dict[Text, Any]]
json_node = self._json_data # type: Optional[Dict[Text, Any]]
path = tuple() # type: Tuple[Text, ...]
stack = [(data_node, json_node, path)]
while stack:
data_node, json_node, path = stack.pop()
if isinstance(data_node, set) or isinstance(json_node, list):
assert data_node is None or json_node is None
yield path
else:
assert data_node is None or isinstance(data_node, dict)
assert json_node is None or isinstance(json_node, dict)
keys = set() # type: Set[Text]
if data_node is not None:
keys |= set(iter(data_node))
if json_node is not None:
keys |= set(iter(json_node))
for key in sorted(keys, reverse=True):
stack.append((data_node.get(key) if data_node is not None else None,
json_node.get(key) if json_node is not None else None,
path + (key,)))
def __len__(self):
# type: () -> int
count = 0
stack = [self._data]
while stack:
v = stack.pop()
if isinstance(v, set):
count += 1
else:
stack.extend(v.values())
stack = [self._json_data]
while stack:
v = stack.pop()
if isinstance(v, list):
count += 1
else:
stack.extend(v.values())
return count
def __nonzero__(self):
# type: () -> bool
return bool(self._data) or bool(self._json_data)
__bool__ = __nonzero__
def __contains__(self, key):
# type: (Any) -> bool
# we provide our own impl of this to avoid calling __getitem__ and generating items for
# those in self._json_data
node = self._data
for pathseg in key:
if pathseg in node:
node = node[pathseg]
else:
break
else:
return bool(isinstance(node, set))
node = self._json_data
for pathseg in key:
if pathseg in node:
node = node[pathseg]
else:
break
else:
return bool(isinstance(node, list))
return False
def clear(self):
# type: () -> None
# much, much simpler/quicker than that defined in MutableMapping
self._json_data.clear()
self._data.clear()
self._hashes.clear()
def set_json(self, json_data):
# type: (Dict[Text, Any]) -> None
"""Provide the object with a raw JSON blob
Note that this object graph is assumed to be owned by the TypeData
object after the call, so the caller must not mutate any part of the
graph.
"""
if self._json_data:
raise ValueError("set_json call when JSON data is not empty")
self._json_data = json_data
def to_json(self):
# type: () -> Dict[Text, Any]
"""Convert the current data to JSON
Note that the returned object may contain references to the internal
data structures, and is only guaranteed to be valid until the next
__getitem__, __setitem__, __delitem__ call, so the caller must not
mutate any part of the returned object graph.
"""
json_rv = self._json_data.copy()
def safe_sorter(element):
# type: (Tuple[str,str]) -> Tuple[str,str]
""" key function to sort lists with None values.
Python3 is more strict typewise. Comparing None and str for example is valid
in python2 but throws an exception in python3.
"""
if element and not element[0]:
return ("", element[1])
else:
return element
stack = [(self._data, json_rv, tuple())] # type: List[Tuple[Dict[Text, Any], Dict[Text, Any], Tuple[Text, ...]]]
while stack:
data_node, json_node, par_full_key = stack.pop()
for k, v in data_node.items():
full_key = par_full_key + (k,)
if isinstance(v, set):
assert k not in json_node
json_node[k] = [self._hashes.get(
full_key)] + [t for t in sorted((test.to_json() for test in v), key=safe_sorter)]
else:
json_node[k] = json_node.get(k, {}).copy()
stack.append((v, json_node[k], full_key))
return json_rv
class PathHash(PathHashType):
def __init__(self, data):
# type: (TypeData) -> None
self._data = data
def __getitem__(self, k):
# type: (Tuple[Text, ...]) -> Text
if k not in self._data:
raise KeyError
if k in self._data._hashes:
return self._data._hashes[k]
node = self._data._json_data
for pathseg in k:
if pathseg in node:
node = node[pathseg]
else:
break
else:
return node[0] # type: ignore
assert False, "unreachable"
raise KeyError
def __setitem__(self, k, v):
# type: (Tuple[Text, ...], Text) -> None
if k not in self._data:
raise KeyError
if k in self._data._hashes:
self._data._hashes[k] = v
node = self._data._json_data
for pathseg in k:
if pathseg in node:
node = node[pathseg]
else:
break
else:
node[0] = v # type: ignore
return
self._data._hashes[k] = v
def __delitem__(self, k):
# type: (Tuple[Text, ...]) -> None
raise ValueError("keys here must match underlying data")
def __iter__(self):
# type: () -> Iterator[Tuple[Text, ...]]
return iter(self._data)
def __len__(self):
# type: () -> int
return len(self._data)
|
KiChjang/servo
|
tests/wpt/web-platform-tests/tools/manifest/typedata.py
|
Python
|
mpl-2.0
| 10,877 | 0.000827 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
__author__ = 'Nick'
def index(request):
return redirect('/search')
|
nh0815/PySearch
|
pysearch/views.py
|
Python
|
mit
| 154 | 0.012987 |
#!/usr/bin/env python3
#
# SuperTuxKart - a fun racing game with go-kart
# Copyright (C) 2006-2015 SuperTuxKart-Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# This script uses create_kart_properties.py to create code and then replaces
# the code in the source files. The parts in the source are marked with tags, that
# contain the argument that has to be passed to create_kart_properties.py.
# The script has to be run from the root directory of this project.
import os
import re
import subprocess
from create_kart_properties import functions
def main():
# Check, if it runs in the root directory
if not os.path.isfile("tools/update_characteristics.py"):
print("Please run this script in the root directory of the project.")
exit(1)
for operation, function in functions.items():
result = subprocess.Popen("tools/create_kart_properties.py " +
operation, shell = True,
stdout = subprocess.PIPE).stdout.read().decode('UTF-8')
with open("src/" + function[2], "r") as f:
text = f.read()
# Replace the text by using look behinds and look forwards
text = re.sub("(?<=/\* \<characteristics-start " + operation +
"\> \*/\\n)(.|\n)*(?=\\n\s*/\* <characteristics-end " + operation + "> \*/)", result, text)
with open("src/" + function[2], "w") as f:
f.write(text)
if __name__ == '__main__':
main()
|
SuicSoft/stk-code
|
tools/update_characteristics.py
|
Python
|
gpl-3.0
| 2,103 | 0.009035 |
import unittest
from stomp import backward3
class TestBackward3(unittest.TestCase):
def test_pack_mixed_string_and_bytes(self):
lines = ['SEND', '\n', 'header1:test', '\u6771']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
lines = ['SEND', '\n', 'header1:test', b'\xe6\x9d\xb1']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
def test_decode(self):
self.assertTrue(backward3.decode(None) is None)
self.assertEqual('test', backward3.decode(b'test'))
def test_encode(self):
self.assertEqual(b'test', backward3.encode('test'))
self.assertEqual(b'test', backward3.encode(b'test'))
self.assertRaises(TypeError, backward3.encode, None)
|
GeneralizedLearningUtilities/SuperGLU
|
python_module/stomp/test/p3_backward_test.py
|
Python
|
mit
| 884 | 0 |
#============================================================================
# Name : circ-pic.py
# Author : Luke Mondy
# ============================================================================
#
# Copyright (C) 2012 Mondy Luke
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ============================================================================
from __future__ import division
import sys
import Image, ImageDraw
import argparse
import numpy as np
from math import sqrt
HAVE_PYPRIND = True
try:
import pyprind
except ImportError:
HAVE_PYPRIND = False
LOGGING = False
def log(message):
global LOGGING
if LOGGING:
print message
sys.stdout.flush()
def getImage(image, scale=1.0, grey=True):
try:
log("Opening image: %s" % image)
im = Image.open(image)
except Exception as e:
error_msg = ("Image file you provided:\n{image}\ndoes not exist! Here's what the computer"
"says:\n{exception}".format(image=image, exception=e))
sys.exit(error_msg)
if scale != 1.0:
im = im.resize(tuple(int(i * scale) for i in im.size))
if grey:
im = im.convert('L')
return im
def overlapping(c1, c2):
# circle data type:
# (x, y, rad)
dist = sqrt( (c2[0] - c1[0])**2 + (c2[1] - c1[1])**2 ) # This sqrt is killin' me...
if c1[2] + c2[2] > dist:
return True
return False
def render(circles, path, params, imsize):
log("Rendering...")
if params['bgimg']:
bg = getImage(params['bgimg'], grey=False)
bgim = bg.resize(imsize)
bgpix = bgim.load()
col = params['bgcolour']
col = 255 if col > 255 else col
col = 0 if col < 0 else col
bgcolour = (col, col, col)
outline = (0, 0, 0)
if params['nooutline']:
outline = None
final = Image.new('RGB', imsize, bgcolour)
draw = ImageDraw.Draw(final)
im_x, im_y = imsize
for y in range(im_y):
for x in range(im_x):
circle_radius = circles[x,y]
if circle_radius != 0:
bb = (x - circle_radius, y - circle_radius,
x + circle_radius, y + circle_radius)
fill = bgpix[x, y] if params['bgimg'] else (255, 255, 255)
draw.ellipse(bb, fill=fill, outline=outline)
del draw
final.save(params['outimg'])
def circlerise(params):
global LOGGING
global HAVE_PYPRIND
interval = params['interval']
maxrad = params['maxrad']
scale = params['scale']
im = getImage(params['circimg'], scale)
pixels = im.load()
circles = np.zeros(im.size, int)
"""
=== Algorithm ===
For each pixel in the original image, determine its
"grey" brightness, and determine an appropriate radius
for that.
Now look in the local region for other circles (local
is determined by the max_radius of other circles + the
radius of the current potential circle).
If there is some circles nearby, check to see if the
new circle will overlap with it or not. If all nearby
circles won't overlap, then record the radius in a 2D
array that corresponds to the image.
"""
im_x, im_y = im.size
skips = 0
if LOGGING and HAVE_PYPRIND :
progress = pyprind.ProgBar(im_y, stream=1)
for y in range(0, im_y, interval):
prev_rad = 0
closeness = 0
for x in range(0, im_x, interval):
closeness += 1
# Determine radius
greyval = pixels[x, y]
radius = int(maxrad * (greyval/255))
if radius == 0:
radius = 1
# If we are still going to be inside the last circle
# placed on the same X row, save time and skip.
if prev_rad + radius >= closeness:
skips += 1
continue
bb = [x - radius - maxrad, # Define bounding box.
y - radius - maxrad,
x + radius + maxrad,
y + radius + maxrad]
if bb[0] < 0: # Ensure the bounding box is OK with
bb[0] = 0 # edges. We don't need to check the
if bb[1] < 0: # outer edges because it's OK for the
bb[1] = 0 # centre to be right on the edge.
if bb[2] >= im_x:
bb[2] = im_x - 1
if bb[3] >= im_y:
bb[3] = im_y - 1
c1 = (x, y, radius)
# Use bounding box and numpy to extract the local area around the
# circle. Then use numpy to do a boolean operating to give a
# true/false matrix of whether circles are nearby.
local_area = circles[bb[0]:bb[2], bb[1]:bb[3]]
circle_nearby = local_area != 0
coords_of_local_circles = np.where(circle_nearby)
radii_of_local_cirles = np.expand_dims(local_area[circle_nearby], axis=0) # Need the extra dim for next step
nrby_cirles = np.vstack([coords_of_local_circles, radii_of_local_cirles])
nrby_cirles = nrby_cirles.transpose()
any_overlaps_here = False
if nrby_cirles.shape[0] == 0:
circles[x,y] = radius
prev_rad = radius
closeness = 0
else:
for n in nrby_cirles:
c2 = (n[0]+bb[0], n[1]+bb[1], n[2])
overlap = overlapping(c1, c2)
if overlap:
any_overlaps_here = True
break
# Look if any nearby circles overlap. If any do, don't make
# a circle here.
if not any_overlaps_here:
circles[x, y] = radius
prev_rad = radius
closeness = 0
if LOGGING is True and HAVE_PYPRIND is True:
progress.update()
log("Avoided {skips} calculations".format(skips=skips))
render(circles, "", params, im.size)
def main(argv=None):
parser = argparse.ArgumentParser(description="Using imgcirc!")
addarg = parser.add_argument # just for cleaner code
addarg("--circimg", type=str, required=True,
help="The image that will make up the circles.", )
addarg("--interval", type=int, default=1,
help="Interval between pixels to look at in the circimg. 1 means all pixels.")
addarg("--bgimg", type=str,
help="An image to colour the circles with. Will be resized as needed.")
addarg("--outimg", type=str, required=True,
help="Filename for the outputted image.")
addarg("--maxrad", type=int, default=10,
help="Max radius of a circle (corresponds to a white pixel)")
addarg("--scale", type=float, default=1,
help="Percent to scale up the circimg (sometimes makes it look better).")
addarg("--bgcolour", type=int, default=255,
help="Grey-scale val from 0 to 255")
addarg("--nooutline", action='store_true', default=False,
help="When specified, no outline will be drawn on circles.")
addarg("--log", action='store_true', default=False,
help="Write progress to stdout.")
parsed_args = parser.parse_args()
params = dict(parsed_args.__dict__)
global LOGGING
if params["log"] is True:
LOGGING = True
log("Begin circlerising...")
circlerise(params)
if __name__ == "__main__":
sys.exit(main())
|
OlympusMonds/PyCircleriser
|
PyCircleriser.py
|
Python
|
gpl-3.0
| 8,228 | 0.007049 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-09 22:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('judge', '0038_profile_problem_count'),
]
operations = [
migrations.RemoveField(
model_name='contest',
name='is_external',
),
]
|
monouno/site
|
judge/migrations/0039_remove_contest_is_external.py
|
Python
|
agpl-3.0
| 399 | 0 |
# -*- coding:utf-8 -*-
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
from core.enigma import Enigma
from model.email import Email
from util.config import Config
class EmailSender:
@classmethod
def format_addr(cls, s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
@classmethod
def build_msg(cls, email):
msg = MIMEText(email.body, 'plain', 'utf-8')
msg['From'] = cls.format_addr(u'自己 <%s>' % email.from_addr)
msg['To'] = cls.format_addr(u'自己 <%s>' % email.to_addr)
msg['Subject'] = Header(email.title, 'utf-8').encode()
return msg
@classmethod
def generate_email(cls, title, content):
email = Email()
email.from_addr = Config.get("email_from_addr")
email.to_addr = Config.get("email_to_addr")
email.password = Enigma.decrypt(Config.get("email_password"))
email.stmp_server = Config.get("email_stmp_server")
email.stmp_port = Config.get("email_stmp_port")
email.is_ssl = Config.get("email_is_ssl")
email.title = title
email.body = content
return email
@classmethod
def send(cls, title, content):
email = cls.generate_email(title, content)
msg = cls.build_msg(email)
if email.is_ssl:
server = smtplib.SMTP_SSL(email.stmp_server, email.stmp_port)
else:
server = smtplib.SMTP(email.stmp_server, email.stmp_port)
# server.set_debuglevel(1)
server.login(email.from_addr, email.password)
server.sendmail(email.from_addr, email.to_addr, msg.as_string())
server.quit()
if __name__ == "__main__":
EmailSender.send("test", "test")
|
AaronGeist/Llama
|
core/emailsender.py
|
Python
|
gpl-3.0
| 1,822 | 0.000551 |
from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
found = bool(job.base_folder)
self.main_view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
|
redline-forensics/auto-dm
|
controllers/main_ctrl.py
|
Python
|
gpl-3.0
| 2,757 | 0.001814 |
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_volume
short_description: Create/Delete Cinder Volumes
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove cinder block storage volumes
options:
size:
description:
- Size of volume in GB. This parameter is required when the
I(state) parameter is 'present'.
display_name:
description:
- Name of volume
required: true
display_description:
description:
- String describing the volume
volume_type:
description:
- Volume type for volume
image:
description:
- Image name or id for boot from volume
snapshot_id:
description:
- Volume snapshot id to create from
volume:
description:
- Volume name or id to create from
version_added: "2.3"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
scheduler_hints:
description:
- Scheduler hints passed to volume API in form of dict
version_added: "2.4"
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Creates a new volume
- name: create a volume
hosts: localhost
tasks:
- name: create 40g test volume
os_volume:
state: present
cloud: mordred
availability_zone: az2
size: 40
display_name: test_volume
scheduler_hints:
same_host: 243e8d3c-8f47-4a61-93d6-7215c344b0c0
'''
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _present_volume(module, cloud):
if cloud.volume_exists(module.params['display_name']):
v = cloud.get_volume(module.params['display_name'])
module.exit_json(changed=False, id=v['id'], volume=v)
volume_args = dict(
size=module.params['size'],
volume_type=module.params['volume_type'],
display_name=module.params['display_name'],
display_description=module.params['display_description'],
snapshot_id=module.params['snapshot_id'],
availability_zone=module.params['availability_zone'],
)
if module.params['image']:
image_id = cloud.get_image_id(module.params['image'])
volume_args['imageRef'] = image_id
if module.params['volume']:
volume_id = cloud.get_volume_id(module.params['volume'])
if not volume_id:
module.fail_json(msg="Failed to find volume '%s'" % module.params['volume'])
volume_args['source_volid'] = volume_id
if module.params['scheduler_hints']:
volume_args['scheduler_hints'] = module.params['scheduler_hints']
volume = cloud.create_volume(
wait=module.params['wait'], timeout=module.params['timeout'],
**volume_args)
module.exit_json(changed=True, id=volume['id'], volume=volume)
def _absent_volume(module, cloud, sdk):
changed = False
if cloud.volume_exists(module.params['display_name']):
try:
changed = cloud.delete_volume(name_or_id=module.params['display_name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
except sdk.exceptions.ResourceTimeout:
module.exit_json(changed=changed)
module.exit_json(changed=changed)
def main():
argument_spec = openstack_full_argument_spec(
size=dict(default=None),
volume_type=dict(default=None),
display_name=dict(required=True, aliases=['name']),
display_description=dict(default=None, aliases=['description']),
image=dict(default=None),
snapshot_id=dict(default=None),
volume=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
scheduler_hints=dict(default=None, type='dict')
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['image', 'snapshot_id', 'volume'],
],
)
module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)
state = module.params['state']
if state == 'present' and not module.params['size']:
module.fail_json(msg="Size is required when state is 'present'")
sdk, cloud = openstack_cloud_from_module(module)
try:
if state == 'present':
_present_volume(module, cloud)
if state == 'absent':
_absent_volume(module, cloud, sdk)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
shepdelacreme/ansible
|
lib/ansible/modules/cloud/openstack/os_volume.py
|
Python
|
gpl-3.0
| 5,265 | 0.00133 |
import rppy
import numpy as np
import matplotlib.pyplot as plt
vp1 = 3000
vs1 = 1500
p1 = 2000
e1_1 = 0.0
d1_1 = 0.0
y1_1 = 0.0
e2_1 = 0.0
d2_1 = 0.0
y2_1 = 0.0
d3_1 = 0.0
chi1 = 0.0
C1 = rppy.reflectivity.Cij(vp1, vs1, p1, e1_1, d1_1, y1_1, e2_1, d2_1, y2_1, d3_1)
vp2 = 4000
vs2 = 2000
p2 = 2200
e1_2 = 0.0
d1_2 = 0.0
y1_2 = 0.0
e2_2 = 0.0
d2_2 = 0.0
y2_2 = 0.0
d3_2 = 0.0
chi2 = 0.0
C2 = rppy.reflectivity.Cij(vp2, vs2, p2, e1_2, d1_2, y1_2, e2_2, d2_2, y2_2, d3_2)
phi = np.arange(0, 90, 1)
theta = np.arange(0, 90, 1)
loopang = phi
theta = np.array([30])
rphti = np.zeros(np.shape(loopang))
rpzoe = np.zeros(np.shape(loopang))
rprug = np.zeros(np.shape(loopang))
for aid, val in enumerate(loopang):
rphti[aid] = rppy.reflectivity.exact_ortho(C1, p1, C2, p2, chi1, chi2, loopang[aid], theta)
rprug[aid] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e2_1, d2_1, y2_1, vp2, vs2, p2, e2_2, d2_2, y2_2, np.radians(theta), np.radians(loopang[aid]))
rpzoe[aid] = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, np.radians(theta))
plt.figure(1)
plt.plot(loopang, rphti, loopang, rprug, loopang, rpzoe)
plt.legend(['hti', 'ruger', 'zoe'])
plt.show()
|
shear/rppy
|
temp_test_ortho.py
|
Python
|
bsd-2-clause
| 1,170 | 0.005128 |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from textwrap import dedent
import pytest
import pytablewriter
from ...._common import print_test_result
from ....data import (
Data,
headers,
mix_header_list,
mix_value_matrix,
null_test_data_list,
value_matrix,
value_matrix_iter,
value_matrix_with_none,
vut_style_tabledata,
vut_styles,
)
from .._common import regexp_ansi_escape, strip_ansi_escape
normal_test_data_list = [
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=None,
expected=dedent(
"""\
.. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 3, 4, 3
"""
),
),
Data(
table=None,
indent=0,
header=None,
value=value_matrix,
expected=dedent(
"""\
.. csv-table::
:widths: 1, 5, 5, 3, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=1,
header=headers,
value=value_matrix,
expected=""" .. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
""",
),
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix_with_none,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 5, 4, 6
1, , "a", 1.0,
, 2.2, , 2.2, 2.2
3, 3.3, "ccc", , "cccc"
, , , ,
"""
),
),
Data(
table="table name",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "i", "f", "c", "if", "ifc", "bool", "inf", "nan", "mix_num", "time"
:widths: 3, 4, 6, 4, 5, 6, 8, 5, 9, 27
1, 1.10, "aa", 1.0, 1, True, Infinity, NaN, 1, 2017-01-01T00:00:00
2, 2.20, "bbb", 2.2, 2.2, False, Infinity, NaN, Infinity, "2017-01-02 03:04:05+09:00"
3, 3.33, "cccc", -3.0, "ccc", True, Infinity, NaN, NaN, 2017-01-01T00:00:00
"""
),
),
]
table_writer_class = pytablewriter.RstCsvTableWriter
class Test_RstCsvTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_RstCsvTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in normal_test_data_list
],
)
def test_normal(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_styles(self):
writer = table_writer_class()
writer.from_tabledata(vut_style_tabledata)
writer.column_styles = vut_styles
expected = dedent(
"""\
.. csv-table:: style test
:header: "none", "empty", "tiny", "small", "medium", "large", "null w/ bold", "L bold", "S italic", "L bold italic"
:widths: 6, 7, 6, 7, 8, 7, 14, 8, 10, 15
111, 111, 111, 111, "111", 111, , **111**, *111*, **111**
1234, 1234, 1234, 1234, "1,234", 1 234, , **1234**, *1234*, **1234**
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in null_test_data_list
],
)
def test_normal_empty(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
assert writer.dumps() == ""
assert str(writer) == ""
class Test_RstCsvTableWriter_write_table_iter:
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
dedent(
"""\
.. csv-table:: tablename
:header: "ha", "hb", "hc"
:widths: 5, 5, 5
1, 2, 3
11, 12, 13
1, 2, 3
11, 12, 13
101, 102, 103
1001, 1002, 1003
"""
),
]
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.iteration_length = len(value)
writer.write_table_iter()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in null_test_data_list],
)
def test_normal_smoke(self, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.write_table_iter()
|
thombashi/pytablewriter
|
test/writer/text/rst/test_rst_csv_writer.py
|
Python
|
mit
| 6,875 | 0.001309 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_static_route
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage static IP routes on Cisco IOS network devices
description:
- This module provides declarative management of static
IP routes on Cisco IOS network devices.
notes:
- Tested against IOS 15.6
options:
prefix:
description:
- Network prefix of the static route.
mask:
description:
- Network prefix mask of the static route.
next_hop:
description:
- Next hop IP of the static route.
admin_distance:
description:
- Admin distance of the static route.
default: 1
aggregate:
description: List of static route definitions.
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure static route
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: remove configuration
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0 255.255.255.0 10.0.0.1
"""
from copy import deepcopy
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network_common import remove_default_spec
from ansible.module_utils.ios import load_config, run_commands
from ansible.module_utils.ios import ios_argument_spec, check_args
from ipaddress import ip_network
import re
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
prefix = w['prefix']
mask = w['mask']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('no ip route %s %s %s' % (prefix, mask, next_hop))
elif state == 'present' and w not in have:
commands.append('ip route %s %s %s %s' % (prefix, mask, next_hop,
admin_distance))
return commands
def map_config_to_obj(module):
obj = []
rc, out, err = exec_command(module, 'show ip static route')
match = re.search(r'.*Static local RIB for default\s*(.*)$', out, re.DOTALL)
if match and match.group(1):
for r in match.group(1).splitlines():
splitted_line = r.split()
code = splitted_line[0]
if code != 'M':
continue
cidr = ip_network(to_text(splitted_line[1]))
prefix = str(cidr.network_address)
mask = str(cidr.netmask)
next_hop = splitted_line[4]
admin_distance = splitted_line[2][1]
obj.append({'prefix': prefix, 'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance})
return obj
def map_params_to_obj(module, required_together=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_together(required_together, item)
d = item.copy()
d['admin_distance'] = str(module.params['admin_distance'])
obj.append(d)
else:
obj.append({
'prefix': module.params['prefix'].strip(),
'mask': module.params['mask'].strip(),
'next_hop': module.params['next_hop'].strip(),
'admin_distance': str(module.params['admin_distance']),
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
admin_distance=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'mask', 'next_hop']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_together=required_together)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_static_route.py
|
Python
|
bsd-3-clause
| 7,090 | 0.001551 |
from setuptools import setup, find_packages
from dist_job_mgr.version import VERSION
setup(
name='dist_job_mgr',
version=VERSION,
author='genForma Corp',
author_email='code@genforma.com',
url='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
entry_points = {
'console_scripts': [
'djmctl = dist_job_mgr.djmctl:main',
'djm-worker = dist_job_mgr.worker_main:main'
]},
install_requires=['lockfile>=0.9',], # 'python-daemon'],
license='Apache V2.0',
description='Distributed Job Manager',
long_description="description"
)
|
quaddra/dist_job_mgr
|
setup.py
|
Python
|
apache-2.0
| 639 | 0.00626 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains functions to help with caching.
"""
# Django's cache module
from django.core.cache import cache
# Cached models
from core.spaces.models import Space
# Response types
from django.shortcuts import get_object_or_404
# Tries to get the object from cache
# Else queries the database
# Else returns a 404 error
def _get_cache_key_for_model(model, key):
"""
Returns a unique key for the given model.
We prefix the given `key` with the name of the `model` to provide a further
degree of uniqueness of keys across the cache.
"""
if not isinstance(key, basestring):
raise TypeError('key must be str or a unicode string')
return model.__name__ + '_' + key
def get_or_insert_object_in_cache(model, key, *args, **kwargs):
"""
Returns an instance of the `model` stored in the cache with the given key.
If the object is not found in the cache, it is retrieved from the database
and set in the cache.
"""
actual_key = _get_cache_key_for_model(model, key)
return_object = cache.get(actual_key)
if not return_object:
return_object = get_object_or_404(model, *args, **kwargs)
cache.set(actual_key, return_object)
return return_object
|
cidadania/e-cidadania
|
src/helpers/cache.py
|
Python
|
apache-2.0
| 1,903 | 0 |
# coding=utf-8
__author__ = "AstroPrint Product Team <product@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2017 3DaGoGo, Inc - Released under terms of the AGPLv3 License"
# singleton
_instance = None
def printerProfileManager():
global _instance
if _instance is None:
_instance = PrinterProfileManager()
return _instance
import os
import yaml
import logging
import shutil
from octoprint.settings import settings
class PrinterProfileManager(object):
def __init__(self):
self._settings = settings()
configDir = self._settings.getConfigFolder()
self._infoFile = "%s/printer-profile.yaml" % configDir
self._logger = logging.getLogger(__name__)
self.data = {
'driver': "marlin",
'extruder_count': 1,
'max_nozzle_temp': 280,
'max_bed_temp': 140,
'heated_bed': True,
'cancel_gcode': ['G28 X0 Y0'],
'invert_z': False
}
if not os.path.isfile(self._infoFile):
factoryFile = "%s/printer-profile.factory" % configDir
if os.path.isfile(factoryFile):
shutil.copy(factoryFile, self._infoFile)
else:
open(self._infoFile, 'w').close()
if self._infoFile:
config = None
with open(self._infoFile, "r") as f:
config = yaml.safe_load(f)
def merge_dict(a, b):
for key in b:
if isinstance(b[key], dict):
merge_dict(a[key], b[key])
else:
a[key] = b[key]
if config:
merge_dict(self.data, config)
def save(self):
with open(self._infoFile, "wb") as infoFile:
yaml.safe_dump(
self.data,
infoFile,
default_flow_style=False,
indent=" ",
allow_unicode=True
)
def set(self, changes):
for k in changes:
if k in self.data:
if self.data[k] != changes[k]:
if k == 'driver':
# change printer object
from astroprint.printer.manager import printerManager
printerManager(changes['driver'])
self.data[k] = self._clean(k, changes[k])
else:
self._logger.error(
"trying to set unkonwn printer profile field %s to %s" % \
(k, str(changes[k])))
def _clean(self, field, value):
if field in ['extruder_count', 'max_nozzle_temp', 'max_bed_temp']:
return int(value)
elif field == 'heated_bed':
return bool(value)
else:
return value
|
abinashk-inf/AstroBox
|
src/astroprint/printerprofile/__init__.py
|
Python
|
agpl-3.0
| 2,918 | 0.002742 |
from __future__ import print_function
from __future__ import division
import os
import sys
import numpy as np
import pandas as pd
from ast import literal_eval
try: # run as a package if installed
from pcntoolkit.model.bayesreg import BLR
from pcntoolkit.normative_model.norm_base import NormBase
from pcntoolkit.dataio import fileio
from pcntoolkit.util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
except ImportError:
pass
path = os.path.abspath(os.path.dirname(__file__))
if path not in sys.path:
sys.path.append(path)
del path
from model.bayesreg import BLR
from norm_base import NormBase
from dataio import fileio
from util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
class NormBLR(NormBase):
""" Normative modelling based on Bayesian Linear Regression
"""
def __init__(self, **kwargs):
X = kwargs.pop('X', None)
y = kwargs.pop('y', None)
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
self.optim_alg = kwargs.get('optimizer','powell')
if X is None:
raise(ValueError, "Data matrix must be specified")
if len(X.shape) == 1:
self.D = 1
else:
self.D = X.shape[1]
# Parse model order
if kwargs is None:
model_order = 1
elif 'configparam' in kwargs: # deprecated syntax
model_order = kwargs.pop('configparam')
elif 'model_order' in kwargs:
model_order = kwargs.pop('model_order')
else:
model_order = 1
# Force a default model order and check datatype
if model_order is None:
model_order = 1
if type(model_order) is not int:
model_order = int(model_order)
# configure heteroskedastic noise
if 'varcovfile' in kwargs:
var_cov_file = kwargs.get('varcovfile')
if var_cov_file.endswith('.pkl'):
self.var_covariates = pd.read_pickle(var_cov_file)
else:
self.var_covariates = np.loadtxt(var_cov_file)
if len(self.var_covariates.shape) == 1:
self.var_covariates = self.var_covariates[:, np.newaxis]
n_beta = self.var_covariates.shape[1]
self.var_groups = None
elif 'vargroupfile' in kwargs:
# configure variance groups (e.g. site specific variance)
var_groups_file = kwargs.pop('vargroupfile')
if var_groups_file.endswith('.pkl'):
self.var_groups = pd.read_pickle(var_groups_file)
else:
self.var_groups = np.loadtxt(var_groups_file)
var_ids = set(self.var_groups)
var_ids = sorted(list(var_ids))
n_beta = len(var_ids)
else:
self.var_groups = None
self.var_covariates = None
n_beta = 1
# are we using ARD?
if 'use_ard' in kwargs:
self.use_ard = kwargs.pop('use_ard')
else:
self.use_ard = False
if self.use_ard:
n_alpha = self.D * model_order
else:
n_alpha = 1
# Configure warped likelihood
if 'warp' in kwargs:
warp_str = kwargs.pop('warp')
if warp_str is None:
self.warp = None
n_gamma = 0
else:
# set up warp
exec('self.warp =' + warp_str + '()')
n_gamma = self.warp.get_n_params()
else:
self.warp = None
n_gamma = 0
self._n_params = n_alpha + n_beta + n_gamma
self._model_order = model_order
print("configuring BLR ( order", model_order, ")")
if (theta is None) or (len(theta) != self._n_params):
print("Using default hyperparameters")
self.theta0 = np.zeros(self._n_params)
else:
self.theta0 = theta
self.theta = self.theta0
# initialise the BLR object if the required parameters are present
if (theta is not None) and (y is not None):
Phi = create_poly_basis(X, self._model_order)
self.blr = BLR(theta=theta, X=Phi, y=y,
warp=self.warp, **kwargs)
else:
self.blr = BLR(**kwargs)
@property
def n_params(self):
return self._n_params
@property
def neg_log_lik(self):
return self.blr.nlZ
def estimate(self, X, y, **kwargs):
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
# remove warp string to prevent it being passed to the blr object
kwargs.pop('warp',None)
Phi = create_poly_basis(X, self._model_order)
if len(y.shape) > 1:
y = y.ravel()
if theta is None:
theta = self.theta0
# (re-)initialize BLR object because parameters were not specified
self.blr = BLR(theta=theta, X=Phi, y=y,
var_groups=self.var_groups,
warp=self.warp, **kwargs)
self.theta = self.blr.estimate(theta, Phi, y,
var_covariates=self.var_covariates, **kwargs)
return self
def predict(self, Xs, X=None, y=None, **kwargs):
theta = self.theta # always use the estimated coefficients
# remove from kwargs to avoid downstream problems
kwargs.pop('theta', None)
Phis = create_poly_basis(Xs, self._model_order)
if X is None:
Phi =None
else:
Phi = create_poly_basis(X, self._model_order)
# process variance groups for the test data
if 'testvargroupfile' in kwargs:
var_groups_test_file = kwargs.pop('testvargroupfile')
if var_groups_test_file.endswith('.pkl'):
var_groups_te = pd.read_pickle(var_groups_test_file)
else:
var_groups_te = np.loadtxt(var_groups_test_file)
else:
var_groups_te = None
# process test variance covariates
if 'testvarcovfile' in kwargs:
var_cov_test_file = kwargs.get('testvarcovfile')
if var_cov_test_file.endswith('.pkl'):
var_cov_te = pd.read_pickle(var_cov_test_file)
else:
var_cov_te = np.loadtxt(var_cov_test_file)
else:
var_cov_te = None
# do we want to adjust the responses?
if 'adaptrespfile' in kwargs:
y_adapt = fileio.load(kwargs.pop('adaptrespfile'))
if len(y_adapt.shape) == 1:
y_adapt = y_adapt[:, np.newaxis]
else:
y_adapt = None
if 'adaptcovfile' in kwargs:
X_adapt = fileio.load(kwargs.pop('adaptcovfile'))
Phi_adapt = create_poly_basis(X_adapt, self._model_order)
else:
Phi_adapt = None
if 'adaptvargroupfile' in kwargs:
var_groups_adapt_file = kwargs.pop('adaptvargroupfile')
if var_groups_adapt_file.endswith('.pkl'):
var_groups_ad = pd.read_pickle(var_groups_adapt_file)
else:
var_groups_ad = np.loadtxt(var_groups_adapt_file)
else:
var_groups_ad = None
if y_adapt is None:
yhat, s2 = self.blr.predict(theta, Phi, y, Phis,
var_groups_test=var_groups_te,
var_covariates_test=var_cov_te,
**kwargs)
else:
yhat, s2 = self.blr.predict_and_adjust(theta, Phi_adapt, y_adapt, Phis,
var_groups_test=var_groups_te,
var_groups_adapt=var_groups_ad,
**kwargs)
return yhat, s2
|
amarquand/nispat
|
pcntoolkit/normative_model/norm_blr.py
|
Python
|
gpl-3.0
| 8,464 | 0.006971 |
import os, logging, imp
from autotest_lib.client.bin import test
from autotest_lib.client.common_lib import error
from autotest_lib.client.virt import virt_utils, virt_env_process
class kvm(test.test):
"""
Suite of KVM virtualization functional tests.
Contains tests for testing both KVM kernel code and userspace code.
@copyright: Red Hat 2008-2009
@author: Uri Lublin (uril@redhat.com)
@author: Dror Russo (drusso@redhat.com)
@author: Michael Goldish (mgoldish@redhat.com)
@author: David Huff (dhuff@redhat.com)
@author: Alexey Eromenko (aeromenk@redhat.com)
@author: Mike Burns (mburns@redhat.com)
@see: http://www.linux-kvm.org/page/KVM-Autotest/Client_Install
(Online doc - Getting started with KVM testing)
"""
version = 1
env_version = 1
preserve_srcdir = True
#preserve_srcdir = False
def run_once(self, params):
# Convert params to a Params object
params = virt_utils.Params(params)
# If a dependency test prior to this test has failed, let's fail
# it right away as TestNA.
if params.get("dependency_failed") == 'yes':
raise error.TestNAError("Test dependency failed")
# Report the parameters we've received and write them as keyvals
logging.debug("Test parameters:")
keys = params.keys()
keys.sort()
for key in keys:
logging.debug(" %s = %s", key, params[key])
self.write_test_keyval({key: params[key]})
# Set the log file dir for the logging mechanism used by kvm_subprocess
# (this must be done before unpickling env)
virt_utils.set_log_file_dir(self.debugdir)
# Open the environment file
logging.info("Unpickling env. You may see some harmless error "
"messages.")
env_filename = os.path.join(self.bindir, params.get("env", "env"))
env = virt_utils.Env(env_filename, self.env_version)
test_passed = False
try:
try:
try:
# Get the test routine corresponding to the specified
# test type
t_type = params.get("type")
# Verify if we have the correspondent source file for it
virt_dir = os.path.dirname(virt_utils.__file__)
subtest_dir_virt = os.path.join(virt_dir, "tests")
subtest_dir_kvm = os.path.join(self.bindir, "tests")
subtest_dir = None
for d in [subtest_dir_kvm, subtest_dir_virt]:
module_path = os.path.join(d, "%s.py" % t_type)
if os.path.isfile(module_path):
subtest_dir = d
break
if subtest_dir is None:
raise error.TestError("Could not find test file %s.py "
"on either %s or %s directory" %
subtest_dir_kvm, subtest_dir_virt)
# Load the test module
f, p, d = imp.find_module(t_type, [subtest_dir])
test_module = imp.load_module(t_type, f, p, d)
f.close()
# Preprocess
try:
virt_env_process.preprocess(self, params, env)
finally:
env.save()
# Run the test function
run_func = getattr(test_module, "run_%s" % t_type)
try:
run_func(self, params, env)
finally:
env.save()
test_passed = True
except Exception, e:
logging.error("Test failed: %s: %s",
e.__class__.__name__, e)
try:
virt_env_process.postprocess_on_error(
self, params, env)
finally:
env.save()
raise
finally:
# Postprocess
try:
try:
virt_env_process.postprocess(self, params, env)
except Exception, e:
if test_passed:
raise
logging.error("Exception raised during "
"postprocessing: %s", e)
finally:
env.save()
except Exception, e:
if params.get("abort_on_error") != "yes":
raise
# Abort on error
logging.info("Aborting job (%s)", e)
for vm in env.get_all_vms():
if vm.is_dead():
continue
logging.info("VM '%s' is alive.", vm.name)
for m in vm.monitors:
logging.info("'%s' has a %s monitor unix socket at: %s",
vm.name, m.protocol, m.filename)
logging.info("The command line used to start '%s' was:\n%s",
vm.name, vm.make_qemu_command())
raise error.JobError("Abort requested (%s)" % e)
|
wuzhy/autotest
|
client/tests/kvm/kvm.py
|
Python
|
gpl-2.0
| 5,376 | 0.000558 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import shutil
import tempfile
import time
from pyspark.sql import Row
from pyspark.sql.functions import lit
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, UTCOffsetTimezone
class SerdeTests(ReusedSQLTestCase):
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.testing.sqlutils import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
def test_int_array_serialization(self):
# Note that this test seems dependent on parallelism.
# This issue is because internal object map in Pyrolite is not cleared after op code
# STOP. If we use protocol 4 to pickle Python objects, op code MEMOIZE will store
# objects in the map. We need to clear up it to make sure next unpickling works on
# clear map.
data = self.spark.sparkContext.parallelize([[1, 2, 3, 4]] * 100, numSlices=12)
df = self.spark.createDataFrame(data, "array<integer>")
self.assertEqual(len(list(filter(lambda r: None in r.value, df.collect()))), 0)
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_serde import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
highfei2011/spark
|
python/pyspark/sql/tests/test_serde.py
|
Python
|
apache-2.0
| 6,215 | 0.00177 |
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.fft._fftlog import fht, ifht, fhtoffset
from scipy.special import poch
def test_fht_agrees_with_fftlog():
# check that fht numerically agrees with the output from Fortran FFTLog,
# the results were generated with the provided `fftlogtest` program,
# after fixing how the k array is generated (divide range by n-1, not n)
# test function, analytical Hankel transform is of the same form
def f(r, mu):
return r**(mu+1)*np.exp(-r**2/2)
r = np.logspace(-4, 4, 16)
dln = np.log(r[1]/r[0])
mu = 0.3
offset = 0.0
bias = 0.0
a = f(r, mu)
# test 1: compute as given
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02,
-0.1949518286432330E-02, +0.3789220182554077E-02,
+0.5093959119952945E-03, +0.2785387803618774E-01,
+0.9944952700848897E-01, +0.4599202164586588E+00,
+0.3157462160881342E+00, -0.8201236844404755E-03,
-0.7834031308271878E-03, +0.3931444945110708E-03,
-0.2697710625194777E-03, +0.3568398050238820E-03,
-0.5554454827797206E-03, +0.8286331026468585E-03]
assert_allclose(ours, theirs)
# test 2: change to optimal offset
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05,
+0.3150140927838524E-03, +0.9149121960963704E-03,
+0.5808089753959363E-02, +0.2548065256377240E-01,
+0.1339477692089897E+00, +0.4821530509479356E+00,
+0.2659899781579785E+00, -0.1116475278448113E-01,
+0.1791441617592385E-02, -0.4181810476548056E-03,
+0.1314963536765343E-03, -0.5422057743066297E-04,
+0.3208681804170443E-04, -0.2696849476008234E-04]
assert_allclose(ours, theirs)
# test 3: positive bias
bias = 0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00,
+0.1065374386206564E+00, -0.5121739602708132E-01,
+0.2636649319269470E-01, +0.1697209218849693E-01,
+0.1250215614723183E+00, +0.4739583261486729E+00,
+0.2841149874912028E+00, -0.8312764741645729E-02,
+0.1024233505508988E-02, -0.1644902767389120E-03,
+0.3305775476926270E-04, -0.7786993194882709E-05,
+0.1962258449520547E-05, -0.8977895734909250E-06]
assert_allclose(ours, theirs)
# test 4: negative bias
bias = -0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04,
+0.2123969254700955E-03, +0.1009558244834628E-02,
+0.5131386375222176E-02, +0.2461678673516286E-01,
+0.1235812845384476E+00, +0.4719570096404403E+00,
+0.2893487490631317E+00, -0.1686570611318716E-01,
+0.2231398155172505E-01, -0.1480742256379873E-01,
+0.1692387813500801E+00, +0.3097490354365797E+00,
+2.7593607182401860E+00, 10.5251075070045800E+00]
assert_allclose(ours, theirs)
@pytest.mark.parametrize('optimal', [True, False])
@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0])
@pytest.mark.parametrize('bias', [0, 0.1, -0.1])
@pytest.mark.parametrize('n', [64, 63])
def test_fht_identity(n, bias, offset, optimal):
rng = np.random.RandomState(3491349965)
a = rng.standard_normal(n)
dln = rng.uniform(-1, 1)
mu = rng.uniform(-2, 2)
if optimal:
offset = fhtoffset(dln, mu, initial=offset, bias=bias)
A = fht(a, dln, mu, offset=offset, bias=bias)
a_ = ifht(A, dln, mu, offset=offset, bias=bias)
assert_allclose(a, a_)
def test_fht_special_cases():
rng = np.random.RandomState(3491349965)
a = rng.standard_normal(64)
dln = rng.uniform(-1, 1)
# let xp = (mu+1+q)/2, xm = (mu+1-q)/2, M = {0, -1, -2, ...}
# case 1: xp in M, xm in M => well-defined transform
mu, bias = -4.0, 1.0
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 2: xp not in M, xm in M => well-defined transform
mu, bias = -2.5, 0.5
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 3: xp in M, xm not in M => singular transform
mu, bias = -3.5, 0.5
with pytest.warns(Warning) as record:
fht(a, dln, mu, bias=bias)
assert record, 'fht did not warn about a singular transform'
# case 4: xp not in M, xm in M => singular inverse transform
mu, bias = -2.5, 0.5
with pytest.warns(Warning) as record:
ifht(a, dln, mu, bias=bias)
assert record, 'ifht did not warn about a singular transform'
@pytest.mark.parametrize('n', [64, 63])
def test_fht_exact(n):
rng = np.random.RandomState(3491349965)
# for a(r) a power law r^\gamma, the fast Hankel transform produces the
# exact continuous Hankel transform if biased with q = \gamma
mu = rng.uniform(0, 3)
# convergence of HT: -1-mu < gamma < 1/2
gamma = rng.uniform(-1-mu, 1/2)
r = np.logspace(-2, 2, n)
a = r**gamma
dln = np.log(r[1]/r[0])
offset = fhtoffset(dln, mu, initial=0.0, bias=gamma)
A = fht(a, dln, mu, offset=offset, bias=gamma)
k = np.exp(offset)/r[::-1]
# analytical result
At = (2/k)**gamma * poch((mu+1-gamma)/2, gamma)
assert_allclose(A, At)
|
ilayn/scipy
|
scipy/fft/tests/test_fftlog.py
|
Python
|
bsd-3-clause
| 5,819 | 0 |
#
# Parse tree nodes
#
from __future__ import absolute_import
import cython
cython.declare(sys=object, os=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
absolute_path_length=cython.Py_ssize_t, error_type=object, _py_int_types=object)
import sys, os, copy
from itertools import chain
if sys.version_info[0] >= 3:
_py_int_types = int
else:
_py_int_types = (int, long)
from . import Builtin
from .Errors import error, warning, InternalError, CompileError
from . import Naming
from . import PyrexTypes
from . import TypeSlots
from .PyrexTypes import py_object_type, error_type
from .Symtab import (ModuleScope, LocalScope, ClosureScope,
StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
from .Code import UtilityCode
from .StringEncoding import EncodedString
from . import Future
from . import Options
from . import DebugFlags
from ..Utils import add_metaclass
absolute_path_length = 0
def relative_position(pos):
"""
We embed the relative filename in the generated C file, since we
don't want to have to regenerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
INPUT:
a position tuple -- (absolute filename, line number column position)
OUTPUT:
relative filename
line number
AUTHOR: William Stein
"""
global absolute_path_length
if absolute_path_length==0:
absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + u'\n' + docstring)
doc.encoding = encoding
return doc
def _analyse_signature_annotation(annotation, env):
base_type = None
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
if name.value in ('type', b'type'):
explicit_pytype = True
if not explicit_ctype:
annotation = value
elif name.value in ('ctype', b'ctype'):
explicit_ctype = True
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
if arg_type is not None:
if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
else:
warning(annotation.pos, "Unknown type declaration found in signature annotation")
return base_type, arg_type
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start == code.buffer.stream.tell():
# no code written => undo writing marker
code.buffer.stream.truncate(pristine)
else:
marker = marker.replace('->', '<-', 1)
code.putln(marker)
return res
else:
return func(*args, **kwds)
return f
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
from types import FunctionType
from .Code import CCodeWriter
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m, CCodeWriter)
return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
class CheckAnalysers(type):
"""Metaclass to check that type analysis functions return a node.
"""
methods = set(['analyse_types',
'analyse_expressions',
'analyse_target_types'])
def __new__(cls, name, bases, attrs):
from types import FunctionType
def check(name, func):
def call(*args, **kwargs):
retval = func(*args, **kwargs)
if retval is None:
print('%s %s %s' % (name, args, kwargs))
return retval
return call
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType) and mname in cls.methods:
attrs[mname] = check(mname, m)
return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs)
def _with_metaclass(cls):
if DebugFlags.debug_trace_code_generation:
return add_metaclass(VerboseCodeWriter)(cls)
#return add_metaclass(CheckAnalysers)(cls)
return cls
@_with_metaclass
class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
is_name = 0
is_none = 0
is_nonecheck = 0
is_literal = 0
is_terminator = 0
temps = None
# All descendants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists containing child nodes are thus seen as a way for the node
to hold multiple children directly; the list is not treated as a separate
level in the tree."""
result = copy.copy(self)
for attrname in result.child_attrs:
value = getattr(result, attrname)
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
#
# There are 3 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (0) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (1) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result_code' attribute of each ExprNode with a C code
# fragment.
#
# (2) generate_code
# Emit C code for all declarations, statements and expressions.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
def end_pos(self):
try:
return self._end_pos
except AttributeError:
pos = self.pos
if not self.child_attrs:
self._end_pos = pos
return pos
for attr in self.child_attrs:
child = getattr(self, attr)
# Sometimes lists, sometimes nodes
if child is None:
pass
elif isinstance(child, list):
for c in child:
pos = max(pos, c.end_pos())
else:
pos = max(pos, child.end_pos())
self._end_pos = pos
return pos
def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
"""Debug helper method that returns a recursive string representation of this node.
"""
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
else:
indent = " " * level
res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self))
for key, value in attrs:
res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1))
res += "%s>" % indent
return res
def dump_pos(self, mark_column=False, marker='(#)'):
"""Debug helper method that returns the source code context of this node as a string.
"""
if not self.pos:
return u''
source_desc, line, col = self.pos
contents = source_desc.get_lines(encoding='ASCII',
error_handling='ignore')
# line numbers start at 1
lines = contents[max(0,line-3):line]
current = lines[-1]
if mark_column:
current = current[:col] + marker + current[col:]
lines[-1] = current.rstrip() + u' # <<<<<<<<<<<<<<\n'
lines += contents[line:line+2]
return u'"%s":%d:%d\n%s\n' % (
source_desc.get_escaped_description(), line, col, u''.join(lines))
class CompilerDirectivesNode(Node):
"""
Sets compiler directives for the children nodes
"""
# directives {string:value} A dictionary holding the right value for
# *all* possible directives.
# body Node
child_attrs = ["body"]
def analyse_declarations(self, env):
old = env.directives
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
self.body = self.body.analyse_expressions(env)
env.directives = old
return self
def generate_function_definitions(self, env, code):
env_old = env.directives
code_old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
def generate_cached_builtins_decls(self, env, code):
entries = env.global_scope().undeclared_cached_builtins
for entry in entries:
code.globalstate.add_cached_builtin_decl(entry)
del entries[:]
def generate_lambda_definitions(self, env, code):
for node in env.lambda_defs:
node.generate_function_definitions(env, code)
class StatListNode(Node):
# stats a list of StatNode
child_attrs = ["stats"]
@staticmethod
def create_analysed(pos, env, *args, **kw):
node = StatListNode(pos, *args, **kw)
return node # No node-specific analysis needed
def analyse_declarations(self, env):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
self.stats = [ stat.analyse_expressions(env)
for stat in self.stats ]
return self
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class StatNode(Node):
#
# Code generation for statements is split into the following subphases:
#
# (1) generate_function_definitions
# Emit C code for the definitions of any structs,
# unions, enums and functions defined in the current
# scope-block.
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
def generate_function_definitions(self, env, code):
pass
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
child_attrs = ["body"]
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
# for which this is a base
child_attrs = []
calling_convention = ""
def analyse_templates(self):
# Only C++ functions have templates.
return None
class CNameDeclaratorNode(CDeclaratorNode):
# name string The Cython name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
child_attrs = ['default']
default = None
def analyse(self, base_type, env, nonempty = 0):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
else:
self.name = base_type.declaration_code("", for_display=1, pyrex=1)
base_type = py_object_type
if base_type.is_fused and env.fused_to_specific:
base_type = base_type.specialize(env.fused_to_specific)
self.type = base_type
return self, base_type
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env, nonempty = nonempty)
class CReferenceDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
return self.base.analyse(ref_type, env, nonempty = nonempty)
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
child_attrs = ["base", "dimension"]
def analyse(self, base_type, env, nonempty = 0):
if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction:
from .ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
args = self.dimension.args
else:
args = self.dimension,
values = [v.analyse_as_type(env) for v in args]
if None in values:
ix = values.index(None)
error(args[ix].pos, "Template parameter not a type")
base_type = error_type
else:
base_type = base_type.specialize_here(self.pos, values)
return self.base.analyse(base_type, env, nonempty = nonempty)
if self.dimension:
self.dimension = self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
error(self.dimension.pos, "Array dimension not integer")
size = self.dimension.get_constant_c_result_code()
if size is not None:
try:
size = int(size)
except ValueError:
# runtime constant?
pass
else:
size = None
if not base_type.is_complete():
error(self.pos,
"Array element type '%s' is incomplete" % base_type)
if base_type.is_pyobject:
error(self.pos,
"Array element cannot be a Python object")
if base_type.is_cfunction:
error(self.pos,
"Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
return self.base.analyse(array_type, env, nonempty = nonempty)
class CFuncDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# args [CArgDeclNode]
# templates [TemplatePlaceholderType]
# has_varargs boolean
# exception_value ConstNode
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# is_const_method boolean Whether this is a const method
child_attrs = ["base", "args", "exception_value"]
overridable = 0
optional_arg_count = 0
is_const_method = 0
templates = None
def analyse_templates(self):
if isinstance(self.base, CArrayDeclaratorNode):
from .ExprNodes import TupleNode, NameNode
template_node = self.base.dimension
if isinstance(template_node, TupleNode):
template_nodes = template_node.args
elif isinstance(template_node, NameNode):
template_nodes = [template_node]
else:
error(template_node.pos, "Template arguments must be a list of names")
return None
self.templates = []
for template in template_nodes:
if isinstance(template, NameNode):
self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name))
else:
error(template.pos, "Template arguments must be a list of names")
self.base = self.base.base
return self.templates
else:
return None
def analyse(self, return_type, env, nonempty = 0, directive_locals = {}):
if nonempty:
nonempty -= 1
func_type_args = []
for i, arg_node in enumerate(self.args):
name_declarator, type = arg_node.analyse(
env, nonempty=nonempty, is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives))
name = name_declarator.name
if name in directive_locals:
type_node = directive_locals[name]
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(self.base.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
type = other_type
if name_declarator.cname:
error(self.pos,
"Function argument cannot have C name specification")
if i==0 and env.is_c_class_scope and type.is_unspecified:
# fix the type of self
type = env.parent_type
# Turn *[] argument into **
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
# Catch attempted C-style func(void) decl
if type.is_void:
error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
func_type_args.append(
PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, "Non-default argument follows default argument")
exc_val = None
exc_check = 0
if self.exception_check == '+':
env.add_include_file('ios') # for std::ios_base::failure
env.add_include_file('new') # for std::bad_alloc
env.add_include_file('stdexcept')
env.add_include_file('typeinfo') # for std::bad_cast
if (return_type.is_pyobject
and (self.exception_value or self.exception_check)
and self.exception_check != '+'):
error(self.pos,
"Exception clause not allowed for function returning Python object")
else:
if self.exception_value:
self.exception_value = self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
exc_val_type = self.exception_value.type
if (not exc_val_type.is_error
and not exc_val_type.is_pyobject
and not (exc_val_type.is_cfunction
and not exc_val_type.return_type.is_pyobject
and not exc_val_type.args)):
error(self.exception_value.pos,
"Exception value must be a Python exception or cdef function with no arguments.")
exc_val = self.exception_value
else:
self.exception_value = self.exception_value.coerce_to(
return_type, env).analyse_const_expression(env)
exc_val = self.exception_value.get_constant_c_result_code()
if exc_val is None:
raise InternalError(
"get_constant_c_result_code not implemented for %s" %
self.exception_value.__class__.__name__)
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
exc_check = self.exception_check
if return_type.is_cfunction:
error(self.pos,
"Function cannot return a function")
func_type = PyrexTypes.CFuncType(
return_type, func_type_args, self.has_varargs,
optional_arg_count = self.optional_arg_count,
exception_value = exc_val, exception_check = exc_check,
calling_convention = self.base.calling_convention,
nogil = self.nogil, with_gil = self.with_gil, is_overridable = self.overridable,
is_const_method = self.is_const_method,
templates = self.templates)
if self.optional_arg_count:
if func_type.is_fused:
# This is a bit of a hack... When we need to create specialized CFuncTypes
# on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg
# struct
def declare_opt_arg_struct(func_type, fused_cname):
self.declare_optional_arg_struct(func_type, env, fused_cname)
func_type.declare_opt_arg_struct = declare_opt_arg_struct
else:
self.declare_optional_arg_struct(func_type, env)
callspec = env.directives['callspec']
if callspec:
current = func_type.calling_convention
if current and current != callspec:
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
return self.base.analyse(func_type, env)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
"""
Declares the optional argument struct (the struct used to hold the
values for optional arguments). For fused cdef functions, this is
deferred as analyse_declarations is called only once (on the fused
cdef function).
"""
scope = StructOrUnionScope()
arg_count_member = '%sn' % Naming.pyrex_prefix
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args)-self.optional_arg_count:]:
scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject = 1)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
if fused_cname is not None:
struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname)
op_args_struct = env.global_scope().declare_struct_or_union(
name = struct_cname,
kind = 'struct',
scope = scope,
typedef_flag = 0,
pos = self.pos,
cname = struct_cname)
op_args_struct.defined_in_pxd = 1
op_args_struct.used = 1
func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type)
class CConstDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
const = PyrexTypes.c_const_type(base_type)
return self.base.analyse(const, env, nonempty = nonempty)
class CArgDeclNode(Node):
# Item in a function declaration argument list.
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# not_none boolean Tagged with 'not None'
# or_none boolean Tagged with 'or None'
# accept_none boolean Resolved boolean for not_none/or_none
# default ExprNode or None
# default_value PyObjectConst constant for default value
# annotation ExprNode or None Py3 function arg annotation
# is_self_arg boolean Is the "self" arg of an extension type method
# is_type_arg boolean Is the "class" arg of an extension type classmethod
# is_kw_only boolean Is a keyword-only argument
# is_dynamic boolean Non-literal arg stored inside CyFunction
child_attrs = ["base_type", "declarator", "default", "annotation"]
is_self_arg = 0
is_type_arg = 0
is_generic = 1
kw_only = 0
not_none = 0
or_none = 0
type = None
name_declarator = None
default_value = None
annotation = None
is_dynamic = 0
def analyse(self, env, nonempty = 0, is_self_arg = False):
if is_self_arg:
self.base_type.is_self_arg = self.is_self_arg = True
if self.type is None:
# The parser may misinterpret names as types. We fix that here.
if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
if nonempty:
if self.base_type.is_basic_c_type:
# char, short, long called "int"
type = self.base_type.analyse(env, could_be_name=True)
arg_name = type.empty_declaration_code()
else:
arg_name = self.base_type.name
self.declarator.name = EncodedString(arg_name)
self.base_type.name = None
self.base_type.is_basic_c_type = False
could_be_name = True
else:
could_be_name = False
self.base_type.is_arg = True
base_type = self.base_type.analyse(env, could_be_name=could_be_name)
if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
self.declarator.name = self.base_type.arg_name
# The parser is unable to resolve the ambiguity of [] as part of the
# type (e.g. in buffers) or empty declarator (as with arrays).
# This is only arises for empty multi-dimensional arrays.
if (base_type.is_array
and isinstance(self.base_type, TemplatedTypeNode)
and isinstance(self.declarator, CArrayDeclaratorNode)):
declarator = self.declarator
while isinstance(declarator.base, CArrayDeclaratorNode):
declarator = declarator.base
declarator.base = self.base_type.array_declarator
base_type = base_type.base_type
# inject type declaration from annotations
if self.annotation and env.directives['annotation_typing'] and self.base_type.name is None:
arg_type = self.inject_type_from_annotations(env)
if arg_type is not None:
base_type = arg_type
return self.declarator.analyse(base_type, env, nonempty=nonempty)
else:
return self.name_declarator, self.type
def inject_type_from_annotations(self, env):
annotation = self.annotation
if not annotation:
return None
base_type, arg_type = _analyse_signature_annotation(annotation, env)
if base_type is not None:
self.base_type = base_type
return arg_type
def calculate_default_value_code(self, code):
if self.default_value is None:
if self.default:
if self.default.is_literal:
# will not output any code, just assign the result_code
self.default.generate_evaluation_code(code)
return self.type.cast_code(self.default.result())
self.default_value = code.get_argument_default_const(self.type)
return self.default_value
def annotate(self, code):
if self.default:
self.default.annotate(code)
def generate_assignment_code(self, code, target=None, overloaded_assignment=False):
default = self.default
if default is None or default.is_literal:
return
if target is None:
target = self.calculate_default_value_code(code)
default.generate_evaluation_code(code)
default.make_owned_reference(code)
result = default.result() if overloaded_assignment else default.result_as(self.type)
code.putln("%s = %s;" % (target, result))
if self.type.is_pyobject:
code.put_giveref(default.result())
default.generate_post_assignment_code(code)
default.free_temps(code)
class CBaseTypeNode(Node):
# Abstract base class for C base type nodes.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns the type.
def analyse_as_type(self, env):
return self.analyse(env)
class CAnalysedBaseTypeNode(Node):
# type type
child_attrs = []
def analyse(self, env, could_be_name = False):
return self.type
class CSimpleBaseTypeNode(CBaseTypeNode):
# name string
# module_path [string] Qualifying name components
# is_basic_c_type boolean
# signed boolean
# longness integer
# complex boolean
# is_self_arg boolean Is self argument of C method
# ##is_type_arg boolean Is type argument of class method
child_attrs = []
arg_name = None # in case the argument name was interpreted as a type
module_path = []
is_basic_c_type = False
complex = False
def analyse(self, env, could_be_name = False):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
type = None
if self.is_basic_c_type:
type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
if not type:
error(self.pos, "Unrecognised type modifier combination")
elif self.name == "object" and not self.module_path:
type = py_object_type
elif self.name is None:
if self.is_self_arg and env.is_c_class_scope:
#print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
else:
if self.module_path:
# Maybe it's a nested C++ class.
scope = env
for item in self.module_path:
entry = scope.lookup(item)
if entry is not None and entry.is_cpp_class:
scope = entry.type.scope
else:
scope = None
break
if scope is None:
# Maybe it's a cimport.
scope = env.find_imported_module(self.module_path, self.pos)
if scope:
scope.fused_to_specific = env.fused_to_specific
else:
scope = env
if scope:
if scope.is_c_class_scope:
scope = scope.global_scope()
type = scope.lookup_type(self.name)
if type is not None:
pass
elif could_be_name:
if self.is_self_arg and env.is_c_class_scope:
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
self.arg_name = EncodedString(self.name)
else:
if self.templates:
if not self.name in self.templates:
error(self.pos, "'%s' is not a type identifier" % self.name)
type = PyrexTypes.TemplatePlaceholderType(self.name)
else:
error(self.pos, "'%s' is not a type identifier" % self.name)
if self.complex:
if not type.is_numeric or type.is_complex:
error(self.pos, "can only complexify c numeric types")
type = PyrexTypes.CComplexType(type)
type.create_declaration_utility_code(env)
elif type is Builtin.complex_type:
# Special case: optimise builtin complex type into C's
# double complex. The parser cannot do this (as for the
# normal scalar types) as the user may have redeclared the
# 'complex' type. Testing for the exact type here works.
type = PyrexTypes.c_double_complex_type
type.create_declaration_utility_code(env)
self.complex = True
if type:
return type
else:
return PyrexTypes.error_type
class MemoryViewSliceTypeNode(CBaseTypeNode):
name = 'memoryview'
child_attrs = ['base_type_node', 'axes']
def analyse(self, env, could_be_name = False):
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
from . import MemoryView
try:
axes_specs = MemoryView.get_axes_specs(env, self.axes)
except CompileError as e:
error(e.position, e.message_only)
self.type = PyrexTypes.ErrorType()
return self.type
if not MemoryView.validate_axes(self.pos, axes_specs):
self.type = error_type
else:
self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs)
self.type.validate_memslice_dtype(self.pos)
self.use_memview_utilities(env)
return self.type
def use_memview_utilities(self, env):
from . import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
class CNestedBaseTypeNode(CBaseTypeNode):
# For C++ classes that live inside other C++ classes.
# name string
# base_type CBaseTypeNode
child_attrs = ['base_type']
def analyse(self, env, could_be_name = None):
base_type = self.base_type.analyse(env)
if base_type is PyrexTypes.error_type:
return PyrexTypes.error_type
if not base_type.is_cpp_class:
error(self.pos, "'%s' is not a valid type scope" % base_type)
return PyrexTypes.error_type
type_entry = base_type.scope.lookup_here(self.name)
if not type_entry or not type_entry.is_type:
error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name))
return PyrexTypes.error_type
return type_entry.type
class TemplatedTypeNode(CBaseTypeNode):
# After parsing:
# positional_args [ExprNode] List of positional arguments
# keyword_args DictNode Keyword arguments
# base_type_node CBaseTypeNode
# After analysis:
# type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
child_attrs = ["base_type_node", "positional_args",
"keyword_args", "dtype_node"]
dtype_node = None
name = None
def analyse(self, env, could_be_name = False, base_type = None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
if base_type.is_cpp_class and base_type.is_template_type():
# Templated class
if self.keyword_args and self.keyword_args.key_value_pairs:
error(self.pos, "c++ templates cannot take keyword arguments")
self.type = PyrexTypes.error_type
else:
template_types = []
for template_node in self.positional_args:
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
return error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
elif base_type.is_pyobject:
# Buffer
from . import Buffer
options = Buffer.analyse_buffer_options(
self.pos,
env,
self.positional_args,
self.keyword_args,
base_type.buffer_defaults)
if sys.version_info[0] < 3:
# Py 2.x enforces byte strings as keyword arguments ...
options = dict([ (name.encode('ASCII'), value)
for name, value in options.items() ])
self.type = PyrexTypes.BufferType(base_type, **options)
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs:
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
# It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
self.array_declarator = CArrayDeclaratorNode(self.pos,
base = empty_declarator,
dimension = dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
if self.type.is_fused and env.fused_to_specific:
self.type = self.type.specialize(env.fused_to_specific)
return self.type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
child_attrs = ["base_type", "declarator"]
def analyse(self, env, could_be_name = False):
base = self.base_type.analyse(env, could_be_name)
_, type = self.declarator.analyse(base, env)
return type
class CTupleBaseTypeNode(CBaseTypeNode):
# components [CBaseTypeNode]
child_attrs = ["components"]
def analyse(self, env, could_be_name=False):
component_types = []
for c in self.components:
type = c.analyse(env)
if type.is_pyobject:
error(c.pos, "Tuple types can't (yet) contain Python objects.")
return error_type
component_types.append(type)
entry = env.declare_tuple_type(self.pos, component_types)
entry.used = True
return entry.type
class FusedTypeNode(CBaseTypeNode):
"""
Represents a fused type in a ctypedef statement:
ctypedef cython.fused_type(int, long, long long) integral
name str name of this fused type
types [CSimpleBaseTypeNode] is the list of types to be fused
"""
child_attrs = []
def analyse_declarations(self, env):
type = self.analyse(env)
entry = env.declare_typedef(self.name, type, self.pos)
# Omit the typedef declaration that self.declarator would produce
entry.in_cinclude = True
def analyse(self, env, could_be_name = False):
types = []
for type_node in self.types:
type = type_node.analyse_as_type(env)
if not type:
error(type_node.pos, "Not a type")
continue
if type in types:
error(type_node.pos, "Type specified multiple times")
else:
types.append(type)
# if len(self.types) == 1:
# return types[0]
return PyrexTypes.FusedType(types, name=self.name)
class CConstTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
child_attrs = ["base_type"]
def analyse(self, env, could_be_name = False):
base = self.base_type.analyse(env, could_be_name)
if base.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
return PyrexTypes.c_const_type(base)
class CVarDefNode(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
# in_pxd boolean
# api boolean
# overridable boolean whether it is a cpdef
# modifiers ['inline']
# decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
decorators = None
directive_locals = None
def analyse_declarations(self, env, dest_scope = None):
if self.directive_locals is None:
self.directive_locals = {}
if not dest_scope:
dest_scope = env
self.dest_scope = dest_scope
if self.declarators:
templates = self.declarators[0].analyse_templates()
else:
templates = None
if templates is not None:
if self.visibility != 'extern':
error(self.pos, "Only extern functions allowed")
if len(self.declarators) > 1:
error(self.declarators[1].pos, "Can't multiply declare template types")
env = TemplateScope('func_template', env)
env.directives = env.outer_scope.directives
for template_param in templates:
env.declare_type(template_param.name, template_param, self.pos)
base_type = self.base_type.analyse(env)
if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or
env.is_module_scope):
error(self.pos, "Fused types not allowed here")
return error_type
self.entry = None
visibility = self.visibility
for declarator in self.declarators:
if (len(self.declarators) > 1
and not isinstance(declarator, CNameDeclaratorNode)
and env.directives['warn.multiple_declarators']):
warning(declarator.pos,
"Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). " +
"Each pointer declaration should be on its own line.", 1)
create_extern_wrapper = (self.overridable
and self.visibility == 'extern'
and env.is_module_scope)
if create_extern_wrapper:
declarator.overridable = False
if isinstance(declarator, CFuncDeclaratorNode):
name_declarator, type = declarator.analyse(base_type, env, directive_locals=self.directive_locals)
else:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos,
"Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos,
"Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
if type.is_cfunction:
if 'staticmethod' in env.directives:
type.is_static_method = True
self.entry = dest_scope.declare_cfunction(name, type, declarator.pos,
cname=cname, visibility=self.visibility, in_pxd=self.in_pxd,
api=self.api, modifiers=self.modifiers, overridable=self.overridable)
if self.entry is not None:
self.entry.directive_locals = copy.copy(self.directive_locals)
if create_extern_wrapper:
self.entry.type.create_to_py_utility_code(env)
self.entry.create_wrapper = True
else:
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var(name, type, declarator.pos,
cname=cname, visibility=visibility, in_pxd=self.in_pxd,
api=self.api, is_cdef=1)
if Options.docstrings:
self.entry.doc = embed_position(self.pos, self.doc)
class CStructOrUnionDefNode(StatNode):
# name string
# cname string or None
# kind "struct" or "union"
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
child_attrs = ["attributes"]
def declare(self, env, scope=None):
self.entry = env.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname, visibility = self.visibility, api = self.api,
packed = self.packed)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = StructOrUnionScope(self.name)
self.declare(env, scope)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(env, scope)
if self.visibility != 'extern':
for attr in scope.var_entries:
type = attr.type
while type.is_array:
type = type.base_type
if type == self.entry.type:
error(attr.pos, "Struct cannot contain itself as a member.")
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class CppClassNode(CStructOrUnionDefNode, BlockNode):
# name string
# cname string or None
# visibility "extern"
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# base_classes [CBaseTypeNode]
# templates [string] or None
# decorators [DecoratorNode] or None
decorators = None
def declare(self, env):
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name) for template_name in self.templates]
self.entry = env.declare_cpp_class(
self.name, None, self.pos,
self.cname, base_classes = [], visibility = self.visibility, templates = template_types)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = CppClassScope(self.name, env, templates = self.templates)
def base_ok(base_class):
if base_class.is_cpp_class or base_class.is_struct:
return True
else:
error(self.pos, "Base class '%s' not a struct or class." % base_class)
base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes])
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name) for template_name in self.templates]
self.entry = env.declare_cpp_class(
self.name, scope, self.pos,
self.cname, base_class_types, visibility = self.visibility, templates = template_types)
if self.entry is None:
return
self.entry.is_cpp_class = 1
if scope is not None:
scope.type = self.entry.type
defined_funcs = []
def func_attributes(attributes):
for attr in attributes:
if isinstance(attr, CFuncDefNode):
yield attr
elif isinstance(attr, CompilerDirectivesNode):
for sub_attr in func_attributes(attr.body.stats):
yield sub_attr
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(scope)
for func in func_attributes(self.attributes):
defined_funcs.append(func)
if self.templates is not None:
func.template_declaration = "template <typename %s>" % ", typename ".join(self.templates)
self.body = StatListNode(self.pos, stats=defined_funcs)
self.scope = scope
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(self.entry.type.scope)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(self.entry.type.scope, code)
def generate_execution_code(self, code):
self.body.generate_execution_code(code)
def annotate(self, code):
self.body.annotate(code)
class CEnumDefNode(StatNode):
# name string or None
# cname string or None
# items [CEnumDefItemNode]
# typedef_flag boolean
# visibility "public" or "private" or "extern"
# api boolean
# in_pxd boolean
# create_wrapper boolean
# entry Entry
child_attrs = ["items"]
def declare(self, env):
self.entry = env.declare_enum(self.name, self.pos,
cname = self.cname, typedef_flag = self.typedef_flag,
visibility = self.visibility, api = self.api,
create_wrapper = self.create_wrapper)
def analyse_declarations(self, env):
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for item in self.items:
item.analyse_declarations(env, self.entry)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.visibility == 'public' or self.api:
code.mark_pos(self.pos)
temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
for item in self.entry.enum_values:
code.putln("%s = PyInt_FromLong(%s); %s" % (
temp,
item.cname,
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp)
code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % (
Naming.moddict_cname,
item.name,
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(temp)
class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
child_attrs = ["value"]
def analyse_declarations(self, env, enum_entry):
if self.value:
self.value = self.value.analyse_const_expression(env)
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value = self.value.analyse_const_expression(env)
entry = env.declare_const(self.name, enum_entry.type,
self.value, self.pos, cname = self.cname,
visibility = enum_entry.visibility, api = enum_entry.api,
create_wrapper = enum_entry.create_wrapper)
enum_entry.enum_values.append(entry)
if enum_entry.name:
enum_entry.type.values.append(entry.cname)
class CTypeDefNode(StatNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# visibility "public" or "private"
# api boolean
# in_pxd boolean
child_attrs = ["base_type", "declarator"]
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
name = name_declarator.name
cname = name_declarator.cname
entry = env.declare_typedef(name, type, self.pos,
cname = cname, visibility = self.visibility, api = self.api)
if type.is_fused:
entry.in_cinclude = True
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FuncDefNode(StatNode, BlockNode):
# Base class for function definition nodes.
#
# return_type PyrexType
# #filename string C name of filename string const
# entry Symtab.Entry
# needs_closure boolean Whether or not this function has inner functions/classes/yield
# needs_outer_scope boolean Whether or not this function requires outer scope
# pymethdef_required boolean Force Python method struct generation
# directive_locals { string : ExprNode } locals defined by cython.locals(...)
# directive_returns [ExprNode] type defined by cython.returns(...)
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
#
# is_async_def boolean is a Coroutine function
#
# has_fused_arguments boolean
# Whether this cdef function has fused parameters. This is needed
# by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes
# with fused argument types with a FusedCFuncDefNode
py_func = None
needs_closure = False
needs_outer_scope = False
pymethdef_required = False
is_generator = False
is_generator_body = False
is_async_def = False
modifiers = []
has_fused_arguments = False
star_arg = None
starstar_arg = None
is_cyfunction = False
code_object = None
def analyse_default_values(self, env):
default_seen = 0
for arg in self.args:
if arg.default:
default_seen = 1
if arg.is_generic:
arg.default = arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
else:
error(arg.pos,
"This argument cannot have a default value")
arg.default = None
elif arg.kw_only:
default_seen = 1
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
def analyse_annotations(self, env):
for arg in self.args:
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
def align_argument_type(self, env, arg):
# @cython.locals()
directive_locals = self.directive_locals
orig_type = arg.type
if arg.name in directive_locals:
type_node = directive_locals[arg.name]
other_type = type_node.analyse_as_type(env)
elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']:
type_node = arg.annotation
other_type = arg.inject_type_from_annotations(env)
if other_type is None:
return arg
else:
return arg
if other_type is None:
error(type_node.pos, "Not a type")
elif (orig_type is not PyrexTypes.py_object_type
and not orig_type.same_as(other_type)):
error(arg.base_type.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
arg.type = other_type
return arg
def need_gil_acquisition(self, lenv):
return 0
def create_local_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if self.needs_closure:
lenv = ClosureScope(name=self.entry.name,
outer_scope = genv,
parent_scope = env,
scope_name=self.entry.cname)
else:
lenv = LocalScope(name=self.entry.name,
outer_scope=genv,
parent_scope=env)
lenv.return_type = self.return_type
type = self.entry.type
if type.is_cfunction:
lenv.nogil = type.nogil and not type.with_gil
self.local_scope = lenv
lenv.directives = env.directives
return lenv
def generate_function_body(self, env, code):
self.body.generate_execution_code(code)
def generate_function_definitions(self, env, code):
from . import Buffer
if self.return_type.is_memoryviewslice:
from . import MemoryView
lenv = self.local_scope
if lenv.is_closure_scope and not lenv.is_passthrough:
outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname,
Naming.outer_scope_cname)
else:
outer_scope_cname = Naming.outer_scope_cname
lenv.mangle_closure_cnames(outer_scope_cname)
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# generate lambda function definitions
self.generate_lambda_definitions(lenv, code)
is_getbuffer_slot = (self.entry.name == "__getbuffer__" and
self.entry.scope.is_c_class_scope)
is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and
self.entry.scope.is_c_class_scope)
is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot
if is_buffer_slot:
if 'cython_unused' not in self.modifiers:
self.modifiers = self.modifiers + ['cython_unused']
preprocessor_guard = self.get_preprocessor_guard()
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Profile", "Profile.c"))
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
code.funcstate.gil_owned = not lenv.nogil
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
if preprocessor_guard:
code.putln(preprocessor_guard)
with_pymethdef = (self.needs_assignment_synthesis(env, code) or
self.pymethdef_required)
if self.py_func:
self.py_func.generate_function_header(code,
with_pymethdef = with_pymethdef,
proto_only=True)
self.generate_function_header(code,
with_pymethdef = with_pymethdef)
# ----- Local variable declarations
# Find function scope
cenv = env
while cenv.is_py_class_scope or cenv.is_c_class_scope:
cenv = cenv.outer_scope
if self.needs_closure:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
elif self.needs_outer_scope:
if lenv.is_passthrough:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname))
code.putln(";")
self.generate_argument_declarations(lenv, code)
for entry in lenv.var_entries:
if not (entry.in_closure or entry.is_arg):
code.put_var_declaration(entry)
# Initialize the return variable __pyx_r
init = ""
if not self.return_type.is_void:
if self.return_type.is_pyobject:
init = " = NULL"
elif self.return_type.is_memoryviewslice:
init = ' = ' + MemoryView.memslice_entry_init
code.putln(
"%s%s;" %
(self.return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
self.generate_keyword_list(code)
# ----- Extern library function declarations
lenv.generate_library_function_declarations(code)
# ----- GIL acquisition
acquire_gil = self.acquire_gil
# See if we need to acquire the GIL for variable declarations, or for
# refnanny only
# Closures are not currently possible for cdef nogil functions,
# but check them anyway
have_object_args = self.needs_closure or self.needs_outer_scope
for arg in lenv.arg_entries:
if arg.type.is_pyobject:
have_object_args = True
break
used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used]
acquire_gil_for_var_decls_only = (
lenv.nogil and lenv.has_with_gil_block and
(have_object_args or used_buffer_entries))
acquire_gil_for_refnanny_only = (
lenv.nogil and lenv.has_with_gil_block and not
acquire_gil_for_var_decls_only)
use_refnanny = not lenv.nogil or lenv.has_with_gil_block
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_ensure_gil()
code.funcstate.gil_owned = True
elif lenv.nogil and lenv.has_with_gil_block:
code.declare_gilstate()
if profile or linetrace:
tempvardecl_code.put_trace_declarations()
code_object = self.code_object.calculate_result_code(code) if self.code_object else None
code.put_trace_frame_init(code_object)
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
code.put_setup_refcount_context(
self.entry.name, acquire_gil=acquire_gil_for_refnanny_only)
# ----- Automatic lead-ins for certain special functions
if is_getbuffer_slot:
self.getbuffer_init(code)
# ----- Create closure scope object
if self.needs_closure:
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot)
if not slot_func_cname:
slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname
code.putln("%s = (%s)%s(%s, %s, NULL);" % (
Naming.cur_scope_cname,
lenv.scope_class.type.empty_declaration_code(),
slot_func_cname,
lenv.scope_class.type.typeptr_cname,
Naming.empty_tuple))
code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
if use_refnanny:
code.put_finish_refcount_context()
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
# FIXME: what if the error return value is a Python value?
err_val = self.error_value()
if err_val is None:
if not self.caller_will_check_exceptions():
warning(self.entry.pos,
"Unraisable exception in function '%s'." %
self.entry.qualified_name, 0)
code.put_unraisable(self.entry.qualified_name, lenv.nogil)
#if self.return_type.is_void:
code.putln("return;")
else:
code.putln("return %s;" % err_val)
code.putln("}")
code.put_gotref(Naming.cur_scope_cname)
# Note that it is unsafe to decref the scope at this point.
if self.needs_outer_scope:
if self.is_cyfunction:
code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % (
outer_scope_cname,
cenv.scope_class.type.empty_declaration_code(),
Naming.self_cname))
else:
code.putln("%s = (%s) %s;" % (
outer_scope_cname,
cenv.scope_class.type.empty_declaration_code(),
Naming.self_cname))
if lenv.is_passthrough:
code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname))
elif self.needs_closure:
# inner closures own a reference to their outer parent
code.put_incref(outer_scope_cname, cenv.scope_class.type)
code.put_giveref(outer_scope_cname)
# ----- Trace function call
if profile or linetrace:
# this looks a bit late, but if we don't get here due to a
# fatal error before hand, it's not really worth tracing
code.put_trace_call(self.entry.name, self.pos, nogil=not code.funcstate.gil_owned)
code.funcstate.can_trace = True
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
# If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
is_cdef = isinstance(self, CFuncDefNode)
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
# we aquire arguments from object converstion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned)
for entry in lenv.var_entries:
if entry.is_arg and len(entry.cf_assignments) > 1:
if entry.xdecref_cleanup:
code.put_var_xincref(entry)
else:
code.put_var_incref(entry)
# ----- Initialise local buffer auxiliary variables
for entry in lenv.var_entries + lenv.arg_entries:
if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
Buffer.put_init_vars(entry, code)
# ----- Check and convert arguments
self.generate_argument_type_tests(code)
# ----- Acquire buffer arguments
for entry in lenv.arg_entries:
if entry.type.is_buffer:
Buffer.put_acquire_arg_buffer(entry, code, self.pos)
if acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
# -------------------------
# ----- Function body -----
# -------------------------
self.generate_function_body(env, code)
code.mark_pos(self.pos, trace=False)
code.putln("")
code.putln("/* function exit code */")
# ----- Default return value
if not self.body.is_terminator:
if self.return_type.is_pyobject:
#if self.return_type.is_extension_type:
# lhs = "(PyObject *)%s" % Naming.retval_cname
#else:
lhs = Naming.retval_cname
code.put_init_to_py_none(lhs, self.return_type)
else:
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type, have_gil=not lenv.nogil)
# Clean up buffers -- this calls a Python function
# so need to save and restore error state
buffers_present = len(used_buffer_entries) > 0
#memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice]
if buffers_present:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
if self.return_type.is_memoryviewslice:
MemoryView.put_init_entry(Naming.retval_cname, code)
err_val = Naming.retval_cname
else:
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
# TODO: Fix exception tracing (though currently unused by cProfile).
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
if lenv.nogil and not lenv.has_with_gil_block:
code.putln("{")
code.put_ensure_gil()
code.put_add_traceback(self.entry.qualified_name)
if lenv.nogil and not lenv.has_with_gil_block:
code.put_release_ensured_gil()
code.putln("}")
else:
warning(self.entry.pos,
"Unraisable exception in function '%s'." %
self.entry.qualified_name, 0)
code.put_unraisable(self.entry.qualified_name, lenv.nogil)
default_retval = self.return_type.default_value
if err_val is None and default_retval:
err_val = default_retval
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
# If we are using the non-error cleanup section we should
# jump past it if we have an error. The if-test below determine
# whether this section is used.
if buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice:
code.put_goto(code.return_from_error_cleanup_label)
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
if is_getbuffer_slot:
self.getbuffer_normal_cleanup(code)
if self.return_type.is_memoryviewslice:
# See if our return value is uninitialized on non-error return
# from . import MemoryView
# MemoryView.err_if_nogil_initialized_check(self.pos, env)
cond = code.unlikely(self.return_type.error_condition(
Naming.retval_cname))
code.putln(
'if (%s) {' % cond)
if env.nogil:
code.put_ensure_gil()
code.putln(
'PyErr_SetString('
'PyExc_TypeError,'
'"Memoryview return value is not initialized");')
if env.nogil:
code.put_release_ensured_gil()
code.putln(
'}')
# ----- Return cleanup for both error and no-error return
code.put_label(code.return_from_error_cleanup_label)
for entry in lenv.var_entries:
if not entry.used or entry.in_closure:
continue
if entry.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
elif entry.type.is_pyobject:
if not entry.is_arg or len(entry.cf_assignments) > 1:
if entry.xdecref_cleanup:
code.put_var_xdecref(entry)
else:
code.put_var_decref(entry)
# Decref any increfed args
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if ((acquire_gil or len(entry.cf_assignments) > 1) and
not entry.in_closure):
code.put_var_decref(entry)
elif (entry.type.is_memoryviewslice and
(not is_cdef or len(entry.cf_assignments) > 1)):
# decref slices of def functions and acquired slices from cdef
# functions, but not borrowed slices from cdef functions.
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
default_retval = self.return_type.default_value
err_val = self.error_value()
if err_val is None and default_retval:
err_val = default_retval # FIXME: why is err_val not used?
if self.return_type.is_pyobject:
code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
# We do as Python instances and coerce -1 into -2.
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
if profile or linetrace:
code.funcstate.can_trace = False
if self.return_type.is_pyobject:
code.put_trace_return(Naming.retval_cname, nogil=not code.funcstate.gil_owned)
else:
code.put_trace_return("Py_None", nogil=not code.funcstate.gil_owned)
if not lenv.nogil:
# GIL holding function
code.put_finish_refcount_context()
if acquire_gil or (lenv.nogil and lenv.has_with_gil_block):
# release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode)
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Python version
code.exit_cfunc_scope()
if self.py_func:
self.py_func.generate_function_definitions(env, code)
self.generate_wrapper_functions(code)
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
code.globalstate.use_utility_code(
UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c"))
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
arg_code,
typeptr_cname,
arg.accept_none,
arg.name,
arg.type.is_builtin_type,
code.error_goto(arg.pos)))
else:
error(arg.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_arg_none_check(self, arg, code):
# Generate None check for one argument.
if arg.type.is_memoryviewslice:
cname = "%s.memview" % arg.entry.cname
else:
cname = arg.entry.cname
code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname)
code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % (
max(200, len(arg.name)), arg.name,
code.error_goto(arg.pos)))
code.putln('}')
def generate_wrapper_functions(self, code):
pass
def generate_execution_code(self, code):
code.mark_pos(self.pos)
# Evaluate and store argument default values
for arg in self.args:
if not arg.is_dynamic:
arg.generate_assignment_code(code)
#
# Special code for the __getbuffer__ function
#
def getbuffer_init(self, code):
info = self.local_scope.arg_entries[1].cname
# Python 3.0 betas have a bug in memoryview which makes it call
# getbuffer with a NULL parameter. For now we work around this;
# the following block should be removed when this bug is fixed.
code.putln("if (%s != NULL) {" % info)
code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
code.putln("}")
def getbuffer_error_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj != NULL) {"
% (info, info))
code.put_gotref("%s->obj" % info)
code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
% (info, info))
code.putln("}")
def getbuffer_normal_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info))
code.put_gotref("Py_None")
code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info)
code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
return None
name = self.entry.name
slot = TypeSlots.method_name_to_slot.get(name)
if not slot:
return None
if name == '__long__' and not self.entry.scope.lookup_here('__int__'):
return None
if name in ("__getbuffer__", "__releasebuffer__") and self.entry.scope.is_c_class_scope:
return None
return slot.preprocessor_guard_code()
class CFuncDefNode(FuncDefNode):
# C function definition.
#
# modifiers ['inline']
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# cfunc_declarator the CFuncDeclarator of this function
# (this is also available through declarator or a
# base thereof)
# body StatListNode
# api boolean
# decorators [DecoratorNode] list of decorators
#
# with_gil boolean Acquire GIL around body
# type CFuncType
# py_func wrapper for calling from Python
# overridable whether or not this is a cpdef function
# inline_in_pxd whether this is an inline function in a pxd file
# template_declaration String or None Used for c++ class methods
# is_const_method whether this is a const method
# is_static_method whether this is a static method
# is_c_class_method whether this is a cclass method
child_attrs = ["base_type", "declarator", "body", "py_func_stat"]
inline_in_pxd = False
decorators = None
directive_locals = None
directive_returns = None
override = None
template_declaration = None
is_const_method = False
py_func_stat = None
def unqualified_name(self):
return self.entry.name
def analyse_declarations(self, env):
self.is_c_class_method = env.is_c_class_scope
if self.directive_locals is None:
self.directive_locals = {}
self.directive_locals.update(env.directives['locals'])
if self.directive_returns is not None:
base_type = self.directive_returns.analyse_as_type(env)
if base_type is None:
error(self.directive_returns.pos, "Not a type")
base_type = PyrexTypes.error_type
else:
base_type = self.base_type.analyse(env)
self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod')
# The 2 here is because we need both function and argument names.
if isinstance(self.declarator, CFuncDeclaratorNode):
name_declarator, type = self.declarator.analyse(base_type, env,
nonempty = 2 * (self.body is not None),
directive_locals = self.directive_locals)
else:
name_declarator, type = self.declarator.analyse(base_type, env, nonempty = 2 * (self.body is not None))
if not type.is_cfunction:
error(self.pos,
"Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
self.type = type
type.is_overridable = self.overridable
declarator = self.declarator
while not hasattr(declarator, 'args'):
declarator = declarator.base
self.cfunc_declarator = declarator
self.args = declarator.args
opt_arg_count = self.cfunc_declarator.optional_arg_count
if (self.visibility == 'public' or self.api) and opt_arg_count:
error(self.cfunc_declarator.pos,
"Function with optional arguments may not be declared "
"public or api")
if (type.exception_check == '+' and self.visibility != 'extern'):
warning(self.cfunc_declarator.pos,
"Only extern functions can throw C++ exceptions.")
for formal_arg, type_arg in zip(self.args, type.args):
self.align_argument_type(env, type_arg)
formal_arg.type = type_arg.type
formal_arg.name = type_arg.name
formal_arg.cname = type_arg.cname
self._validate_type_visibility(type_arg.type, type_arg.pos, env)
if type_arg.type.is_fused:
self.has_fused_arguments = True
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
if type_arg.type.is_buffer:
if self.type.nogil:
error(formal_arg.pos,
"Buffer may not be acquired without the GIL. "
"Consider using memoryview slices instead.")
elif 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
self._validate_type_visibility(type.return_type, self.pos, env)
name = name_declarator.name
cname = name_declarator.cname
type.is_const_method = self.is_const_method
type.is_static_method = self.is_static_method
self.entry = env.declare_cfunction(
name, type, self.pos,
cname=cname, visibility=self.visibility, api=self.api,
defining=self.body is not None, modifiers=self.modifiers,
overridable=self.overridable)
self.entry.inline_func_in_pxd = self.inline_in_pxd
self.return_type = type.return_type
if self.return_type.is_array and self.visibility != 'extern':
error(self.pos,
"Function cannot return an array")
if self.return_type.is_cpp_class:
self.return_type.check_nullary_constructor(self.pos, "used as a return value")
if self.overridable and not env.is_module_scope and not self.is_static_method:
if len(self.args) < 1 or not self.args[0].type.is_pyobject:
# An error will be produced in the cdef function
self.overridable = False
self.declare_cpdef_wrapper(env)
self.create_local_scope(env)
def declare_cpdef_wrapper(self, env):
if self.overridable:
if self.is_static_method:
# TODO(robertwb): Finish this up, perhaps via more function refactoring.
error(self.pos, "static cpdef methods not yet supported")
name = self.entry.name
py_func_body = self.call_self_node(is_module_scope = env.is_module_scope)
if self.is_static_method:
from .ExprNodes import NameNode
decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name='staticmethod'))]
decorators[0].decorator.analyse_types(env)
else:
decorators = []
self.py_func = DefNode(pos = self.pos,
name = self.entry.name,
args = self.args,
star_arg = None,
starstar_arg = None,
doc = self.doc,
body = py_func_body,
decorators = decorators,
is_wrapper = 1)
self.py_func.is_module_scope = env.is_module_scope
self.py_func.analyse_declarations(env)
self.py_func_stat = StatListNode(pos = self.pos, stats = [self.py_func])
self.py_func.type = PyrexTypes.py_object_type
self.entry.as_variable = self.py_func.entry
self.entry.used = self.entry.as_variable.used = True
# Reset scope entry the above cfunction
env.entries[name] = self.entry
if (not self.entry.is_final_cmethod and
(not env.is_module_scope or Options.lookup_module_cpdef)):
self.override = OverrideCheckNode(self.pos, py_func = self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
def _validate_type_visibility(self, type, pos, env):
"""
Ensure that types used in cdef functions are public or api, or
defined in a C header.
"""
public_or_api = (self.visibility == 'public' or self.api)
entry = getattr(type, 'entry', None)
if public_or_api and entry and env.is_module_scope:
if not (entry.visibility in ('public', 'extern') or
entry.api or entry.in_cinclude):
error(pos, "Function declared public or api may not have "
"private types")
def call_self_node(self, omit_optional_args=0, is_module_scope=0):
from . import ExprNodes
args = self.type.args
if omit_optional_args:
args = args[:len(args) - self.type.optional_arg_count]
arg_names = [arg.name for arg in args]
if is_module_scope:
cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name)
call_arg_names = arg_names
skip_dispatch = Options.lookup_module_cpdef
elif self.type.is_static_method:
class_entry = self.entry.scope.parent_type.entry
class_node = ExprNodes.NameNode(self.pos, name=class_entry.name)
class_node.entry = class_entry
cfunc = ExprNodes.AttributeNode(self.pos, obj=class_node, attribute=self.entry.name)
# Calling static c(p)def methods on an instance disallowed.
# TODO(robertwb): Support by passing self to check for override?
skip_dispatch = True
else:
type_entry = self.type.args[0].type.entry
type_arg = ExprNodes.NameNode(self.pos, name=type_entry.name)
type_arg.entry = type_entry
cfunc = ExprNodes.AttributeNode(self.pos, obj=type_arg, attribute=self.entry.name)
skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
c_call = ExprNodes.SimpleCallNode(
self.pos,
function=cfunc,
args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names],
wrapper_call=skip_dispatch)
return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
error(arg.pos, "Missing argument name")
self.declare_argument(env, arg)
def need_gil_acquisition(self, lenv):
return self.type.with_gil
def nogil_check(self, env):
type = self.type
with_gil = type.with_gil
if type.nogil and not with_gil:
if type.return_type.is_pyobject:
error(self.pos,
"Function with Python return type cannot be declared nogil")
for entry in self.local_scope.var_entries:
if entry.type.is_pyobject and not entry.in_with_gil_block:
error(self.pos, "Function declared nogil has Python locals or temporaries")
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
if self.py_func is not None:
# this will also analyse the default values
self.py_func = self.py_func.analyse_expressions(env)
else:
self.analyse_default_values(env)
self.analyse_annotations(env)
self.acquire_gil = self.need_gil_acquisition(self.local_scope)
return self
def needs_assignment_synthesis(self, env, code=None):
return False
def generate_function_header(self, code, with_pymethdef, with_opt_args = 1, with_dispatch = 1, cname = None):
scope = self.local_scope
arg_decls = []
type = self.type
for arg in type.args[:len(type.args)-type.optional_arg_count]:
arg_decl = arg.declaration_code()
entry = scope.lookup(arg.name)
if not entry.cf_used:
arg_decl = 'CYTHON_UNUSED %s' % arg_decl
arg_decls.append(arg_decl)
if with_dispatch and self.overridable:
dispatch_arg = PyrexTypes.c_int_type.declaration_code(
Naming.skip_dispatch_cname)
if self.override:
arg_decls.append(dispatch_arg)
else:
arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg)
if type.optional_arg_count and with_opt_args:
arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname))
if type.has_varargs:
arg_decls.append("...")
if not arg_decls:
arg_decls = ["void"]
if cname is None:
cname = self.entry.func_cname
entity = type.function_header_code(cname, ', '.join(arg_decls))
if self.entry.visibility == 'private' and '::' not in cname:
storage_class = "static "
else:
storage_class = ""
dll_linkage = None
modifiers = code.build_function_modifiers(self.entry.func_modifiers)
header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage)
#print (storage_class, modifiers, header)
needs_proto = self.is_c_class_method
if self.template_declaration:
if needs_proto:
code.globalstate.parts['module_declarations'].putln(self.template_declaration)
code.putln(self.template_declaration)
if needs_proto:
code.globalstate.parts['module_declarations'].putln("%s%s%s; /* proto*/" % (storage_class, modifiers, header))
code.putln("%s%s%s {" % (storage_class, modifiers, header))
def generate_argument_declarations(self, env, code):
scope = self.local_scope
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
result = arg.calculate_default_value_code(code)
code.putln('%s = %s;' % (
arg.type.declaration_code(arg.cname), result))
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
i = 0
used = 0
scope = self.local_scope
if self.type.optional_arg_count:
code.putln('if (%s) {' % Naming.optional_args_cname)
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
code.putln('if (%s->%sn > %s) {' %
(Naming.optional_args_cname,
Naming.pyrex_prefix, i))
declarator = arg.declarator
while not hasattr(declarator, 'name'):
declarator = declarator.base
code.putln('%s = %s->%s;' %
(arg.cname, Naming.optional_args_cname,
self.type.opt_arg_cname(declarator.name)))
used += 1
i += 1
for _ in range(used):
code.putln('}')
code.putln('}')
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure and not arg.default:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(scope.lookup_here(arg.name))
def generate_argument_conversion_code(self, code):
pass
def generate_argument_type_tests(self, code):
# Generate type tests for args whose type in a parent
# class is a supertype of the declared type.
for arg in self.type.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif arg.type.is_pyobject and not arg.accept_none:
self.generate_arg_none_check(arg, code)
def generate_execution_code(self, code):
super(CFuncDefNode, self).generate_execution_code(code)
if self.py_func_stat:
self.py_func_stat.generate_execution_code(code)
def error_value(self):
if self.return_type.is_pyobject:
return "0"
else:
#return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
def generate_wrapper_functions(self, code):
# If the C signature of a function has changed, we need to generate
# wrappers to put in the slots here.
k = 0
entry = self.entry
func_type = entry.type
while entry.prev_entry is not None:
k += 1
entry = entry.prev_entry
entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
code.putln()
self.generate_function_header(code,
0,
with_dispatch = entry.type.is_overridable,
with_opt_args = entry.type.optional_arg_count,
cname = entry.func_cname)
if not self.return_type.is_void:
code.put('return ')
args = self.type.args
arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]]
if entry.type.is_overridable:
arglist.append(Naming.skip_dispatch_cname)
elif func_type.is_overridable:
arglist.append('0')
if entry.type.optional_arg_count:
arglist.append(Naming.optional_args_cname)
elif func_type.optional_arg_count:
arglist.append('NULL')
code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
code.putln('}')
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# for * and ** arguments).
#
# name string
# entry Symtab.Entry
# annotation ExprNode or None Py3 argument annotation
child_attrs = []
is_self_arg = False
is_type_arg = False
def generate_function_definitions(self, env, code):
self.entry.generate_function_definitions(env, code)
class DecoratorNode(Node):
# A decorator
#
# decorator NameNode or CallNode or AttributeNode
child_attrs = ['decorator']
class DefNode(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# lambda_name string the internal name of a lambda 'function'
# decorators [DecoratorNode] list of decorators
# args [CArgDeclNode] formal arguments
# doc EncodedString or None
# body StatListNode
# return_type_annotation
# ExprNode or None the Py3 return type annotation
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# fused_py_func DefNode The original fused cpdef DefNode
# (in case this is a specialization)
# specialized_cpdefs [DefNode] list of specialized cpdef DefNodes
# py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
#
# decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"]
lambda_name = None
reqd_kw_flags_cname = "0"
is_wrapper = 0
no_assignment_synthesis = 0
decorators = None
return_type_annotation = None
entry = None
acquire_gil = 0
self_in_stararg = 0
py_cfunc_node = None
requires_classobj = False
defaults_struct = None # Dynamic kwrds structure name
doc = None
fused_py_func = False
specialized_cpdefs = None
py_wrapper = None
py_wrapper_required = True
func_cname = None
defaults_getter = None
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
k = rk = r = 0
for arg in self.args:
if arg.kw_only:
k += 1
if not arg.default:
rk += 1
if not arg.default:
r += 1
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, modifiers=None):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name = name_declarator.name,
cname = None,
type = py_object_type,
pos = formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type = py_object_type,
args = cfunc_args,
has_varargs = False,
exception_value = None,
exception_check = False,
nogil = False,
with_gil = False,
is_overridable = overridable)
cfunc = CVarDefNode(self.pos, type=cfunc_type)
else:
if scope is None:
scope = cfunc.scope
cfunc_type = cfunc.type
if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs:
error(self.pos, "wrong number of arguments")
error(cfunc.pos, "previous declaration here")
for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)):
name_declarator, type = formal_arg.analyse(scope, nonempty=1,
is_self_arg = (i == 0 and scope.is_c_class_scope))
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
from . import ExprNodes
if cfunc_type.exception_value is None:
exception_value = None
else:
exception_value = ExprNodes.ConstNode(self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
declarator = CFuncDeclaratorNode(self.pos,
base = CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args = self.args,
has_varargs = False,
exception_check = cfunc_type.exception_check,
exception_value = exception_value,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil)
return CFuncDefNode(self.pos,
modifiers = modifiers or [],
base_type = CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator = declarator,
body = self.body,
doc = self.doc,
overridable = cfunc_type.is_overridable,
type = cfunc_type,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil,
visibility = 'private',
api = False,
directive_locals = getattr(cfunc, 'directive_locals', {}),
directive_returns = returns)
def is_cdef_func_compatible(self):
"""Determines if the function's signature is compatible with a
cdef function. This can be used before calling
.as_cfunction() to see if that will be successful.
"""
if self.needs_closure:
return False
if self.star_arg or self.starstar_arg:
return False
return True
def analyse_declarations(self, env):
self.is_classmethod = self.is_staticmethod = False
if self.decorators:
for decorator in self.decorators:
func = decorator.decorator
if func.is_name:
self.is_classmethod |= func.name == 'classmethod'
self.is_staticmethod |= func.name == 'staticmethod'
if self.is_classmethod and env.lookup_here('classmethod'):
# classmethod() was overridden - not much we can do here ...
self.is_classmethod = False
if self.is_staticmethod and env.lookup_here('staticmethod'):
# staticmethod() was overridden - not much we can do here ...
self.is_staticmethod = False
if self.name == '__new__' and env.is_py_class_scope:
self.is_staticmethod = 1
self.analyse_argument_types(env)
if self.name == '<lambda>':
self.declare_lambda_function(env)
else:
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
# if a signature annotation provides a more specific return object type, use it
if self.return_type is py_object_type and self.return_type_annotation:
if env.directives['annotation_typing'] and not self.entry.is_special:
_, return_type = _analyse_signature_annotation(self.return_type_annotation, env)
if return_type and return_type.is_pyobject:
self.return_type = return_type
self.create_local_scope(env)
self.py_wrapper = DefNodeWrapper(
self.pos,
target=self,
name=self.entry.name,
args=self.args,
star_arg=self.star_arg,
starstar_arg=self.starstar_arg,
return_type=self.return_type)
self.py_wrapper.analyse_declarations(env)
def analyse_argument_types(self, env):
self.directive_locals = env.directives['locals']
allow_none_for_extension_args = env.directives['allow_none_for_extension_args']
f2s = env.fused_to_specific
env.fused_to_specific = None
for arg in self.args:
if hasattr(arg, 'name'):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
arg.type = type
if type.is_fused:
self.has_fused_arguments = True
self.align_argument_type(env, arg)
if name_declarator and name_declarator.cname:
error(self.pos,
"Python function argument cannot have C name specification")
arg.type = arg.type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice:
if arg.or_none:
arg.accept_none = True
elif arg.not_none:
arg.accept_none = False
elif (arg.type.is_extension_type or arg.type.is_builtin_type
or arg.type.is_buffer or arg.type.is_memoryviewslice):
if arg.default and arg.default.constant_result is None:
# special case: def func(MyType obj = None)
arg.accept_none = True
else:
# default depends on compiler directive
arg.accept_none = allow_none_for_extension_args
else:
# probably just a plain 'object'
arg.accept_none = True
else:
arg.accept_none = True # won't be used, but must be there
if arg.not_none:
error(arg.pos, "Only Python type arguments can have 'not None'")
if arg.or_none:
error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
error(self.pos, "special functions of cdef classes cannot have decorators")
self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg):
# Use the simpler calling signature for zero- and one-argument functions.
if self.entry.signature is TypeSlots.pyfunction_signature:
if len(self.args) == 0:
self.entry.signature = TypeSlots.pyfunction_noargs
elif len(self.args) == 1:
if self.args[0].default is None and not self.args[0].kw_only:
self.entry.signature = TypeSlots.pyfunction_onearg
elif self.entry.signature is TypeSlots.pymethod_signature:
if len(self.args) == 1:
self.entry.signature = TypeSlots.unaryfunc
elif len(self.args) == 2:
if self.args[1].default is None and not self.args[1].kw_only:
self.entry.signature = TypeSlots.ibinaryfunc
sig = self.entry.signature
nfixed = sig.num_fixed_args()
if sig is TypeSlots.pymethod_signature and nfixed == 1 \
and len(self.args) == 0 and self.star_arg:
# this is the only case where a diverging number of
# arguments is not an error - when we have no explicit
# 'self' parameter as in method(*args)
sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
self.self_in_stararg = 1
nfixed = 0
if self.is_staticmethod and env.is_c_class_scope:
nfixed = 0
self.self_in_stararg = True # FIXME: why for staticmethods?
self.entry.signature = sig = copy.copy(sig)
sig.fixed_arg_format = "*"
sig.is_staticmethod = True
sig.has_generic_args = True
if ((self.is_classmethod or self.is_staticmethod) and
self.has_fused_arguments and env.is_c_class_scope):
del self.decorator_indirection.stats[:]
for i in range(min(nfixed, len(self.args))):
arg = self.args[i]
arg.is_generic = 0
if sig.is_self_arg(i) and not self.is_staticmethod:
if self.is_classmethod:
arg.is_type_arg = 1
arg.hdr_type = arg.type = Builtin.type_type
else:
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
else:
arg.needs_conversion = 1
if arg.needs_conversion:
arg.hdr_cname = Naming.arg_prefix + arg.name
else:
arg.hdr_cname = Naming.var_prefix + arg.name
if nfixed > len(self.args):
self.bad_signature()
return
elif nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and \
(arg.type.is_extension_type or arg.type.is_builtin_type):
arg.needs_type_test = 1
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.num_fixed_args()
if sig.has_generic_args:
expected_str += " or more"
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
else:
desc = "Method"
error(self.pos,
"%s %s has wrong number of arguments "
"(%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry = env.lookup_here(name)
if entry:
if entry.is_final_cmethod and not env.parent_type.is_final_type:
error(self.pos, "Only final types can have final Python (def/cpdef) methods")
if (entry.type.is_cfunction and not entry.is_builtin_cmethod
and not self.is_wrapper):
warning(self.pos, "Overriding cdef method with def method.", 5)
entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
self.entry = entry
prefix = env.next_id(env.scope_prefix)
self.entry.pyfunc_cname = Naming.pyfunc_prefix + prefix + name
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
entry.doc_cname = Naming.funcdoc_prefix + prefix + name
if entry.is_special:
if entry.name in TypeSlots.invisible or not entry.doc or (entry.name in '__getattr__' and env.directives['fast_getattr']):
entry.wrapperbase_cname = None
else:
entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name
else:
entry.doc = None
def declare_lambda_function(self, env):
entry = env.declare_lambda_function(self.lambda_name, self.pos)
entry.doc = None
self.entry = entry
self.entry.pyfunc_cname = entry.cname
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.is_arg = 1
arg.entry.used = 1
arg.entry.is_self_arg = arg.is_self_arg
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
if env.directives['infer_types'] != False:
type = PyrexTypes.unspecified_type
else:
type = py_object_type
entry = env.declare_var(arg.name, type, arg.pos)
entry.is_arg = 1
entry.used = 1
entry.init = "0"
entry.xdecref_cleanup = 1
arg.entry = entry
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
self.analyse_annotations(env)
if self.return_type_annotation:
self.return_type_annotation = self.return_type_annotation.analyse_types(env)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
decorator.decorator = decorator.decorator.analyse_expressions(env)
self.py_wrapper.prepare_argument_coercion(env)
return self
def needs_assignment_synthesis(self, env, code=None):
if self.is_staticmethod:
return True
if self.is_wrapper or self.specialized_cpdefs or self.entry.is_fused_specialized:
return False
if self.no_assignment_synthesis:
return False
# Should enable for module level as well, that will require more testing...
if self.entry.is_anonymous:
return True
if env.is_module_scope:
if code is None:
return env.directives['binding']
else:
return code.globalstate.directives['binding']
return env.is_py_class_scope or env.is_closure_scope
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return self.entry.signature.exception_check
def generate_function_definitions(self, env, code):
if self.defaults_getter:
self.defaults_getter.generate_function_definitions(env, code)
# Before closure cnames are mangled
if self.py_wrapper_required:
# func_cname might be modified by @cname
self.py_wrapper.func_cname = self.entry.func_cname
self.py_wrapper.generate_function_definitions(env, code)
FuncDefNode.generate_function_definitions(self, env, code)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
if proto_only:
if self.py_wrapper_required:
self.py_wrapper.generate_function_header(
code, with_pymethdef, True)
return
arg_code_list = []
if self.entry.signature.has_dummy_arg:
self_arg = 'PyObject *%s' % Naming.self_cname
if not self.needs_outer_scope:
self_arg = 'CYTHON_UNUSED ' + self_arg
arg_code_list.append(self_arg)
def arg_decl_code(arg):
entry = arg.entry
if entry.in_closure:
cname = entry.original_cname
else:
cname = entry.cname
decl = entry.type.declaration_code(cname)
if not entry.cf_used:
decl = 'CYTHON_UNUSED ' + decl
return decl
for arg in self.args:
arg_code_list.append(arg_decl_code(arg))
if self.star_arg:
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
arg_code = ', '.join(arg_code_list)
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
preprocessor_guard = self.get_preprocessor_guard()
if preprocessor_guard:
decls_code.putln(preprocessor_guard)
decls_code.putln(
"static %s(%s); /* proto */" % (dc, arg_code))
if preprocessor_guard:
decls_code.putln("#endif")
code.putln("static %s(%s) {" % (dc, arg_code))
def generate_argument_declarations(self, env, code):
pass
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(arg.entry)
for arg in self.star_arg, self.starstar_arg:
if arg:
put_into_closure(arg.entry)
def generate_argument_type_tests(self, code):
pass
class DefNodeWrapper(FuncDefNode):
# DefNode python wrapper code generator
defnode = None
target = None # Target DefNode
def __init__(self, *args, **kwargs):
FuncDefNode.__init__(self, *args, **kwargs)
self.num_kwonly_args = self.target.num_kwonly_args
self.num_required_kw_args = self.target.num_required_kw_args
self.num_required_args = self.target.num_required_args
self.self_in_stararg = self.target.self_in_stararg
self.signature = None
def analyse_declarations(self, env):
target_entry = self.target.entry
name = self.name
prefix = env.next_id(env.scope_prefix)
target_entry.func_cname = Naming.pywrap_prefix + prefix + name
target_entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name
self.signature = target_entry.signature
def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand
# all utility code here, simply because we cannot easily distinguish
# different code types.
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
elif arg.hdr_type and not arg.hdr_type.is_pyobject:
if not arg.hdr_type.create_to_py_utility_code(env):
pass # will fail later
if self.starstar_arg and not self.starstar_arg.entry.cf_used:
# we will set the kwargs argument to NULL instead of a new dict
# and must therefore correct the control flow state
entry = self.starstar_arg.entry
entry.xdecref_cleanup = 1
for ass in entry.cf_assignments:
if not ass.is_arg and ass.lhs.is_name:
ass.lhs.cf_maybe_null = True
def signature_has_nongeneric_args(self):
argcount = len(self.args)
if argcount == 0 or (
argcount == 1 and (self.args[0].is_self_arg or
self.args[0].is_type_arg)):
return 0
return 1
def signature_has_generic_args(self):
return self.signature.has_generic_args
def generate_function_body(self, code):
args = []
if self.signature.has_dummy_arg:
args.append(Naming.self_cname)
for arg in self.args:
if arg.hdr_type and not (arg.type.is_memoryviewslice or
arg.type.is_struct or
arg.type.is_complex):
args.append(arg.type.cast_code(arg.entry.cname))
else:
args.append(arg.entry.cname)
if self.star_arg:
args.append(self.star_arg.entry.cname)
if self.starstar_arg:
args.append(self.starstar_arg.entry.cname)
args = ', '.join(args)
if not self.return_type.is_void:
code.put('%s = ' % Naming.retval_cname)
code.putln('%s(%s);' % (
self.target.entry.pyfunc_cname, args))
def generate_function_definitions(self, env, code):
lenv = self.target.local_scope
# Generate C code for header and body of function
code.mark_pos(self.pos)
code.putln("")
code.putln("/* Python wrapper */")
preprocessor_guard = self.target.get_preprocessor_guard()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or
self.target.pymethdef_required)
self.generate_function_header(code, with_pymethdef)
self.generate_argument_declarations(lenv, code)
tempvardecl_code = code.insertion_point()
if self.return_type.is_pyobject:
retval_init = ' = 0'
else:
retval_init = ''
if not self.return_type.is_void:
code.putln('%s%s;' % (
self.return_type.declaration_code(Naming.retval_cname),
retval_init))
code.put_declare_refcount_context()
code.put_setup_refcount_context('%s (wrapper)' % self.name)
self.generate_argument_parsing_code(lenv, code)
self.generate_argument_type_tests(code)
self.generate_function_body(code)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# ----- Error cleanup
if code.error_label in code.labels_used:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
err_val = self.error_value()
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in lenv.var_entries:
if entry.is_arg and entry.type.is_pyobject:
code.put_var_decref(entry)
code.put_finish_refcount_context()
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln('}')
code.exit_cfunc_scope()
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
arg_code_list = []
sig = self.signature
if sig.has_dummy_arg or self.self_in_stararg:
arg_code = "PyObject *%s" % Naming.self_cname
if not sig.has_dummy_arg:
arg_code = 'CYTHON_UNUSED ' + arg_code
arg_code_list.append(arg_code)
for arg in self.args:
if not arg.is_generic:
if arg.is_self_arg or arg.is_type_arg:
arg_code_list.append("PyObject *%s" % arg.hdr_cname)
else:
arg_code_list.append(
arg.hdr_type.declaration_code(arg.hdr_cname))
entry = self.target.entry
if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]:
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if entry.scope.is_c_class_scope and entry.name == "__ipow__":
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if sig.has_generic_args:
arg_code_list.append(
"PyObject *%s, PyObject *%s"
% (Naming.args_cname, Naming.kwds_cname))
arg_code = ", ".join(arg_code_list)
# Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__'
mf = ""
if (entry.name in ("__getbuffer__", "__releasebuffer__")
and entry.scope.is_c_class_scope):
mf = "CYTHON_UNUSED "
with_pymethdef = False
dc = self.return_type.declaration_code(entry.func_cname)
header = "static %s%s(%s)" % (mf, dc, arg_code)
code.putln("%s; /*proto*/" % header)
if proto_only:
if self.target.fused_py_func:
# If we are the specialized version of the cpdef, we still
# want the prototype for the "fused cpdef", in case we're
# checking to see if our method was overridden in Python
self.target.fused_py_func.generate_function_header(
code, with_pymethdef, proto_only=True)
return
if (Options.docstrings and entry.doc and
not self.target.fused_py_func and
not entry.scope.is_property_scope and
(not entry.is_special or entry.wrapperbase_cname)):
# h_code = code.globalstate['h_code']
docstr = entry.doc
if docstr.is_unicode:
docstr = docstr.as_utf8_string()
code.putln(
'static char %s[] = %s;' % (
entry.doc_cname,
docstr.as_c_string_literal()))
if entry.is_special:
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln(
"struct wrapperbase %s;" % entry.wrapperbase_cname)
code.putln('#endif')
if with_pymethdef or self.target.fused_py_func:
code.put(
"static PyMethodDef %s = " %
entry.pymethdef_cname)
code.put_pymethoddef(self.target.entry, ";", allow_skip=False)
code.putln("%s {" % header)
def generate_argument_declarations(self, env, code):
for arg in self.args:
if arg.is_generic:
if arg.needs_conversion:
code.putln("PyObject *%s = 0;" % arg.hdr_cname)
else:
code.put_var_declaration(arg.entry)
for entry in env.var_entries:
if entry.is_arg:
code.put_var_declaration(entry)
def generate_argument_parsing_code(self, env, code):
# Generate fast equivalent of PyArg_ParseTuple call for
# generic arguments, if any, including args/kwargs
old_error_label = code.new_error_label()
our_error_label = code.error_label
end_label = code.new_label("argument_unpacking_done")
has_kwonly_args = self.num_kwonly_args > 0
has_star_or_kw_args = self.star_arg is not None \
or self.starstar_arg is not None or has_kwonly_args
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
if not self.signature_has_generic_args():
if has_star_or_kw_args:
error(self.pos, "This method cannot have * or keyword arguments")
self.generate_argument_conversion_code(code)
elif not self.signature_has_nongeneric_args():
# func(*args) or func(**kw) or func(*args, **kw)
self.generate_stararg_copy_code(code)
else:
self.generate_tuple_and_keyword_parsing_code(self.args, end_label, code)
code.error_label = old_error_label
if code.label_used(our_error_label):
if not code.label_used(end_label):
code.put_goto(end_label)
code.put_label(our_error_label)
if has_star_or_kw_args:
self.generate_arg_decref(self.star_arg, code)
if self.starstar_arg:
if self.starstar_arg.entry.xdecref_cleanup:
code.put_var_xdecref_clear(self.starstar_arg.entry)
else:
code.put_var_decref_clear(self.starstar_arg.entry)
code.put_add_traceback(self.target.entry.qualified_name)
code.put_finish_refcount_context()
code.putln("return %s;" % self.error_value())
if code.label_used(end_label):
code.put_label(end_label)
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref_clear(arg.entry)
def generate_arg_decref(self, arg, code):
if arg:
code.put_var_decref_clear(arg.entry)
def generate_stararg_copy_code(self, code):
if not self.star_arg:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
Naming.args_cname)
code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
self.name, Naming.args_cname, self.error_value()))
code.putln("}")
if self.starstar_arg:
if self.star_arg or not self.starstar_arg.entry.cf_used:
kwarg_check = "unlikely(%s)" % Naming.kwds_cname
else:
kwarg_check = "%s" % Naming.kwds_cname
else:
kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
Naming.kwds_cname, Naming.kwds_cname)
code.globalstate.use_utility_code(
UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c"))
code.putln(
"if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
kwarg_check, Naming.kwds_cname, self.name,
bool(self.starstar_arg), self.error_value()))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references):
code.putln("if (%s) {" % kwarg_check)
code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
code.putln("} else {")
code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,))
code.putln("}")
self.starstar_arg.entry.xdecref_cleanup = 1
else:
code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
Naming.kwds_cname))
code.putln("if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname, self.error_value()))
self.starstar_arg.entry.xdecref_cleanup = 0
code.put_gotref(self.starstar_arg.entry.cname)
if self.self_in_stararg and not self.target.is_staticmethod:
# need to create a new tuple with 'self' inserted as first item
code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
self.star_arg.entry.cname,
Naming.args_cname,
self.star_arg.entry.cname))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
code.putln("{")
code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type)
code.putln("return %s;" % self.error_value())
code.putln("}")
else:
code.putln("return %s;" % self.error_value())
code.put_gotref(self.star_arg.entry.cname)
code.put_incref(Naming.self_cname, py_object_type)
code.put_giveref(Naming.self_cname)
code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
self.star_arg.entry.cname, Naming.self_cname))
temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
temp, temp, Naming.args_cname, temp))
code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
Naming.args_cname, temp))
code.put_incref("item", py_object_type)
code.put_giveref("item")
code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
self.star_arg.entry.cname, temp))
code.putln("}")
code.funcstate.release_temp(temp)
self.star_arg.entry.xdecref_cleanup = 0
elif self.star_arg:
code.put_incref(Naming.args_cname, py_object_type)
code.putln("%s = %s;" % (
self.star_arg.entry.cname,
Naming.args_cname))
self.star_arg.entry.xdecref_cleanup = 0
def generate_tuple_and_keyword_parsing_code(self, args, success_label, code):
argtuple_error_label = code.new_label("argtuple_error")
positional_args = []
required_kw_only_args = []
optional_kw_only_args = []
for arg in args:
if arg.is_generic:
if arg.default:
if not arg.is_self_arg and not arg.is_type_arg:
if arg.kw_only:
optional_kw_only_args.append(arg)
else:
positional_args.append(arg)
elif arg.kw_only:
required_kw_only_args.append(arg)
elif not arg.is_self_arg and not arg.is_type_arg:
positional_args.append(arg)
# sort required kw-only args before optional ones to avoid special
# cases in the unpacking code
kw_only_args = required_kw_only_args + optional_kw_only_args
min_positional_args = self.num_required_args - self.num_required_kw_args
if len(args) > 0 and (args[0].is_self_arg or args[0].is_type_arg):
min_positional_args -= 1
max_positional_args = len(positional_args)
has_fixed_positional_count = not self.star_arg and \
min_positional_args == max_positional_args
has_kw_only_args = bool(kw_only_args)
if self.num_required_kw_args:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
if self.starstar_arg or self.star_arg:
self.generate_stararg_init_code(max_positional_args, code)
code.putln('{')
all_args = tuple(positional_args) + tuple(kw_only_args)
code.putln("static PyObject **%s[] = {%s,0};" % (
Naming.pykwdlist_cname,
','.join([ '&%s' % code.intern_identifier(arg.name)
for arg in all_args ])))
# Before being converted and assigned to the target variables,
# borrowed references to all unpacked argument values are
# collected into a local PyObject* array called "values",
# regardless if they were taken from default arguments,
# positional arguments or keyword arguments. Note that
# C-typed default arguments are handled at conversion time,
# so their array value is NULL in the end if no argument
# was passed for them.
self.generate_argument_values_setup_code(all_args, code)
# --- optimised code when we receive keyword arguments
code.putln("if (%s(%s)) {" % (
(self.num_required_kw_args > 0) and "likely" or "unlikely",
Naming.kwds_cname))
self.generate_keyword_unpacking_code(
min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code)
# --- optimised code when we do not receive any keyword arguments
if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args:
# Python raises arg tuple related errors first, so we must
# check the length here
if min_positional_args == max_positional_args and not self.star_arg:
compare = '!='
else:
compare = '<'
code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
Naming.args_cname, compare, min_positional_args))
code.put_goto(argtuple_error_label)
if self.num_required_kw_args:
# pure error case: keywords required but not passed
if max_positional_args > min_positional_args and not self.star_arg:
code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname, max_positional_args))
code.put_goto(argtuple_error_label)
code.putln('} else {')
for i, arg in enumerate(kw_only_args):
if not arg.default:
pystring_cname = code.intern_identifier(arg.name)
# required keyword-only argument missing
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name,
pystring_cname))
code.putln(code.error_goto(self.pos))
break
else:
# optimised tuple unpacking code
code.putln('} else {')
if min_positional_args == max_positional_args:
# parse the exact number of positional arguments from
# the args tuple
for i, arg in enumerate(positional_args):
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
else:
# parse the positional arguments from the variable length
# args tuple and reject illegal argument tuple sizes
code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname)
if self.star_arg:
code.putln('default:')
reversed_args = list(enumerate(positional_args))[::-1]
for i, arg in reversed_args:
if i >= min_positional_args-1:
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
if min_positional_args == 0:
code.put('case 0: ')
code.putln('break;')
if self.star_arg:
if min_positional_args:
for i in range(min_positional_args-1, -1, -1):
code.putln('case %2d:' % i)
code.put_goto(argtuple_error_label)
else:
code.put('default: ')
code.put_goto(argtuple_error_label)
code.putln('}')
code.putln('}') # end of the conditional unpacking blocks
# Convert arg values to their final type and assign them.
# Also inject non-Python default arguments, which do cannot
# live in the values[] array.
for i, arg in enumerate(all_args):
self.generate_arg_assignment(arg, "values[%d]" % i, code)
code.putln('}') # end of the whole argument unpacking block
if code.label_used(argtuple_error_label):
code.put_goto(success_label)
code.put_label(argtuple_error_label)
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args,
Naming.args_cname))
code.putln(code.error_goto(self.pos))
def generate_arg_assignment(self, arg, item, code):
if arg.type.is_pyobject:
# Python default arguments were already stored in 'item' at the very beginning
if arg.is_generic:
item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item)
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
func = arg.type.from_py_function
if func:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
rhs = "%s(%s)" % (func, item)
if arg.type.is_enum:
rhs = arg.type.cast_code(rhs)
code.putln("%s = %s; %s" % (
arg.entry.cname,
rhs,
code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
if arg.default:
code.putln('} else {')
code.putln(
"%s = %s;" % (
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.type.is_memoryviewslice:
code.put_incref_memoryviewslice(arg.entry.cname,
have_gil=True)
code.putln('}')
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
def generate_stararg_init_code(self, max_positional_args, code):
if self.starstar_arg:
self.starstar_arg.entry.xdecref_cleanup = 0
code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % (
self.starstar_arg.entry.cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
if self.star_arg:
self.star_arg.entry.xdecref_cleanup = 0
code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname,
max_positional_args))
code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
self.star_arg.entry.cname, Naming.args_cname,
max_positional_args, Naming.args_cname))
code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname)
if self.starstar_arg:
code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
code.put_finish_refcount_context()
code.putln('return %s;' % self.error_value())
code.putln('}')
code.put_gotref(self.star_arg.entry.cname)
code.putln('} else {')
code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple))
code.put_incref(Naming.empty_tuple, py_object_type)
code.putln('}')
def generate_argument_values_setup_code(self, args, code):
max_args = len(args)
# the 'values' array collects borrowed references to arguments
# before doing any type coercion etc.
code.putln("PyObject* values[%d] = {%s};" % (
max_args, ','.join('0'*max_args)))
if self.target.defaults_struct:
code.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % (
self.target.defaults_struct, Naming.dynamic_args_cname,
self.target.defaults_struct, Naming.self_cname))
# assign borrowed Python default values to the values array,
# so that they can be overwritten by received arguments below
for i, arg in enumerate(args):
if arg.default and arg.type.is_pyobject:
default_value = arg.calculate_default_value_code(code)
code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value)))
def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code):
code.putln('Py_ssize_t kw_args;')
code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname)
# copy the values from the args tuple and check that it's not too long
code.putln('switch (pos_args) {')
if self.star_arg:
code.putln('default:')
for i in range(max_positional_args-1, -1, -1):
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
i, Naming.args_cname, i))
code.putln('case 0: break;')
if not self.star_arg:
code.put('default: ') # more arguments than allowed
code.put_goto(argtuple_error_label)
code.putln('}')
# The code above is very often (but not always) the same as
# the optimised non-kwargs tuple unpacking code, so we keep
# the code block above at the very top, before the following
# 'external' PyDict_Size() call, to make it easy for the C
# compiler to merge the two separate tuple unpacking
# implementations into one when they turn out to be identical.
# If we received kwargs, fill up the positional/required
# arguments with values from the kw dict
code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname)
if self.num_required_args or max_positional_args > 0:
last_required_arg = -1
for i, arg in enumerate(all_args):
if not arg.default:
last_required_arg = i
if last_required_arg < max_positional_args:
last_required_arg = max_positional_args-1
if max_positional_args > 0:
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
code.putln('case %2d:' % i)
pystring_cname = code.intern_identifier(arg.name)
if arg.default:
if arg.kw_only:
# optional kw-only args are handled separately below
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
code.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
# special case: we know arg 0 is missing
code.put('else ')
code.put_goto(argtuple_error_label)
else:
# print the correct number of values (args or
# kwargs) that were passed into positional
# arguments up to this point
code.putln('else {')
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args, i))
code.putln(code.error_goto(self.pos))
code.putln('}')
elif arg.kw_only:
code.putln('else {')
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' %(
self.name, pystring_cname))
code.putln(code.error_goto(self.pos))
code.putln('}')
if max_positional_args > 0:
code.putln('}')
if has_kw_only_args:
# unpack optional keyword-only arguments separately because
# checking for interned strings in a dict is faster than iterating
self.generate_optional_kwonly_args_unpacking_code(all_args, code)
code.putln('if (unlikely(kw_args > 0)) {')
# non-positional/-required kw args left in dict: default args,
# kw-only args, **kwargs or error
#
# This is sort of a catch-all: except for checking required
# arguments, this will always do the right thing for unpacking
# keyword arguments, so that we can concentrate on optimising
# common cases above.
if max_positional_args == 0:
pos_arg_count = "0"
elif self.star_arg:
code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
max_positional_args, max_positional_args))
pos_arg_count = "used_pos_args"
else:
pos_arg_count = "pos_args"
code.globalstate.use_utility_code(
UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c"))
code.putln(
'if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
Naming.kwds_cname,
Naming.pykwdlist_cname,
self.starstar_arg and self.starstar_arg.entry.cname or '0',
pos_arg_count,
self.name,
code.error_goto(self.pos)))
code.putln('}')
def generate_optional_kwonly_args_unpacking_code(self, all_args, code):
optional_args = []
first_optional_arg = -1
for i, arg in enumerate(all_args):
if not arg.kw_only or not arg.default:
continue
if not optional_args:
first_optional_arg = i
optional_args.append(arg.name)
if optional_args:
if len(optional_args) > 1:
# if we receive more than the named kwargs, we either have **kwargs
# (in which case we must iterate anyway) or it's an error (which we
# also handle during iteration) => skip this part if there are more
code.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % (
not self.starstar_arg and 'likely' or '',
len(optional_args)))
code.putln('Py_ssize_t index;')
# not unrolling the loop here reduces the C code overhead
code.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % (
first_optional_arg, first_optional_arg + len(optional_args)))
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
code.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
code.putln('}')
code.putln('}')
def generate_argument_conversion_code(self, code):
# Generate code to convert arguments from signature type to
# declared type, if needed. Also copies signature arguments
# into closure fields.
for arg in self.args:
if arg.needs_conversion:
self.generate_arg_conversion(arg, code)
def generate_arg_conversion(self, arg, code):
# Generate conversion code for one argument.
old_type = arg.hdr_type
new_type = arg.type
if old_type.is_pyobject:
if arg.default:
code.putln("if (%s) {" % arg.hdr_cname)
else:
code.putln("assert(%s); {" % arg.hdr_cname)
self.generate_arg_conversion_from_pyobject(arg, code)
code.putln("}")
elif new_type.is_pyobject:
self.generate_arg_conversion_to_pyobject(arg, code)
else:
if new_type.assignable_from(old_type):
code.putln(
"%s = %s;" % (arg.entry.cname, arg.hdr_cname))
else:
error(arg.pos,
"Cannot convert 1 argument from '%s' to '%s'" %
(old_type, new_type))
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
# copied from CoerceFromPyTypeNode
if func:
lhs = arg.entry.cname
rhs = "%s(%s)" % (func, arg.hdr_cname)
if new_type.is_enum:
rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; %s" % (
lhs,
rhs,
code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos,
"Cannot convert Python object argument to type '%s'"
% new_type)
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
if func:
code.putln("%s = %s(%s); %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
code.error_goto_if_null(arg.entry.cname, arg.pos)))
code.put_var_gotref(arg.entry)
else:
error(arg.pos,
"Cannot convert argument of type '%s' to Python object"
% old_type)
def generate_argument_type_tests(self, code):
# Generate type tests for args whose signature
# type is PyObject * and whose declared type is
# a subtype thereof.
for arg in self.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif not arg.accept_none and (arg.type.is_pyobject or
arg.type.is_buffer or
arg.type.is_memoryviewslice):
self.generate_arg_none_check(arg, code)
def error_value(self):
return self.signature.error_value
class GeneratorDefNode(DefNode):
# Generator function node that creates a new generator instance when called.
#
# gbody GeneratorBodyDefNode the function implementing the generator
#
is_generator = True
is_coroutine = False
needs_closure = True
child_attrs = DefNode.child_attrs + ["gbody"]
def __init__(self, pos, **kwargs):
# XXX: don't actually needs a body
kwargs['body'] = StatListNode(pos, stats=[], is_terminator=True)
super(GeneratorDefNode, self).__init__(pos, **kwargs)
def analyse_declarations(self, env):
super(GeneratorDefNode, self).analyse_declarations(env)
self.gbody.local_scope = self.local_scope
self.gbody.analyse_declarations(env)
def generate_function_body(self, env, code):
body_cname = self.gbody.entry.func_cname
name = code.intern_identifier(self.name)
qualname = code.intern_identifier(self.qualname)
code.putln('{')
code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
'(__pyx_coroutine_body_t) %s, (PyObject *) %s, %s, %s); %s' % (
'Coroutine' if self.is_coroutine else 'Generator',
body_cname, Naming.cur_scope_cname, name, qualname,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
classobj_cname = 'gen->classobj'
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
classobj_cname, Naming.self_cname))
code.put_incref(classobj_cname, py_object_type)
code.put_giveref(classobj_cname)
code.put_finish_refcount_context()
code.putln('return (PyObject *) gen;')
code.putln('}')
def generate_function_definitions(self, env, code):
env.use_utility_code(UtilityCode.load_cached(
'Coroutine' if self.is_coroutine else 'Generator', "Coroutine.c"))
self.gbody.generate_function_header(code, proto=True)
super(GeneratorDefNode, self).generate_function_definitions(env, code)
self.gbody.generate_function_definitions(env, code)
class AsyncDefNode(GeneratorDefNode):
is_coroutine = True
class GeneratorBodyDefNode(DefNode):
# Main code body of a generator implemented as a DefNode.
#
is_generator_body = True
is_inlined = False
inlined_comprehension_type = None # container type for inlined comprehensions
def __init__(self, pos=None, name=None, body=None):
super(GeneratorBodyDefNode, self).__init__(
pos=pos, body=body, name=name, doc=None,
args=[], star_arg=None, starstar_arg=None)
def declare_generator_body(self, env):
prefix = env.next_id(env.scope_prefix)
name = env.next_id('generator')
cname = Naming.genbody_prefix + prefix + name
entry = env.declare_var(None, py_object_type, self.pos,
cname=cname, visibility='private')
entry.func_cname = cname
entry.qualified_name = EncodedString(self.name)
self.entry = entry
def analyse_declarations(self, env):
self.analyse_argument_types(env)
self.declare_generator_body(env)
def generate_function_header(self, code, proto=False):
header = "static PyObject *%s(__pyx_CoroutineObject *%s, PyObject *%s)" % (
self.entry.func_cname,
Naming.generator_cname,
Naming.sent_value_cname)
if proto:
code.putln('%s; /* proto */' % header)
else:
code.putln('%s /* generator body */\n{' % header)
def generate_function_definitions(self, env, code):
lenv = self.local_scope
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
self.generate_function_header(code)
closure_init_code = code.insertion_point()
# ----- Local variables
code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
tempvardecl_code = code.insertion_point()
code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
resume_code = code.insertion_point()
first_run_label = code.new_label('first_run')
code.use_label(first_run_label)
code.put_label(first_run_label)
code.putln('%s' %
(code.error_goto_if_null(Naming.sent_value_cname, self.pos)))
# ----- prepare target container for inlined comprehension
if self.is_inlined and self.inlined_comprehension_type is not None:
target_type = self.inlined_comprehension_type
if target_type is Builtin.list_type:
comp_init = 'PyList_New(0)'
elif target_type is Builtin.set_type:
comp_init = 'PySet_New(NULL)'
elif target_type is Builtin.dict_type:
comp_init = 'PyDict_New()'
else:
raise InternalError(
"invalid type of inlined comprehension: %s" % target_type)
code.putln("%s = %s; %s" % (
Naming.retval_cname, comp_init,
code.error_goto_if_null(Naming.retval_cname, self.pos)))
code.put_gotref(Naming.retval_cname)
# ----- Function body
self.generate_function_body(env, code)
# ----- Closure initialization
if lenv.scope_class.type.scope.entries:
closure_init_code.putln('%s = %s;' % (
lenv.scope_class.type.declaration_code(Naming.cur_scope_cname),
lenv.scope_class.type.cast_code('%s->closure' %
Naming.generator_cname)))
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.is_inlined and not self.body.is_terminator:
code.putln('PyErr_SetNone(PyExc_StopIteration);')
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
if self.is_inlined and self.inlined_comprehension_type is not None:
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
if Future.generator_stop in env.global_scope().context.future_directives:
# PEP 479: turn accidental StopIteration exceptions into a RuntimeError
code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c"))
code.putln("if (unlikely(PyErr_ExceptionMatches(PyExc_StopIteration))) "
"__Pyx_Generator_Replace_StopIteration();")
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_add_traceback(self.entry.qualified_name)
# ----- Non-error return cleanup
code.put_label(code.return_label)
if self.is_inlined:
code.put_xgiveref(Naming.retval_cname)
else:
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
code.putln('%s->resume_label = -1;' % Naming.generator_cname)
# clean up as early as possible to help breaking any reference cycles
code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname)
code.put_finish_refcount_context()
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Generator resume code
resume_code.putln("switch (%s->resume_label) {" % (
Naming.generator_cname))
resume_code.putln("case 0: goto %s;" % first_run_label)
for i, label in code.yield_labels:
resume_code.putln("case %d: goto %s;" % (i, label))
resume_code.putln("default: /* CPython raises the right error here */")
resume_code.put_finish_refcount_context()
resume_code.putln("return NULL;")
resume_code.putln("}")
code.exit_cfunc_scope()
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
# is overriden.
#
# py_func
#
# args
# func_temp
# body
child_attrs = ['body']
body = None
def analyse_expressions(self, env):
self.args = env.arg_entries
if self.py_func.is_module_scope:
first_arg = 0
else:
first_arg = 1
from . import ExprNodes
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_node = ExprNodes.SimpleCallNode(
self.pos, function=self.func_node,
args=[ ExprNodes.NameNode(self.pos, name=arg.name)
for arg in self.args[first_arg:] ])
if env.return_type.is_void or env.return_type.is_returncode:
self.body = StatListNode(self.pos, stats=[
ExprStatNode(self.pos, expr=call_node),
ReturnStatNode(self.pos, value=None)])
else:
self.body = ReturnStatNode(self.pos, value=call_node)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
# Check to see if we are an extension type
if self.py_func.is_module_scope:
self_arg = "((PyObject *)%s)" % Naming.module_cname
else:
self_arg = "((PyObject *)%s)" % self.args[0].cname
code.putln("/* Check if called by wrapper */")
code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname)
code.putln("/* Check if overridden in Python */")
if self.py_func.is_module_scope:
code.putln("else {")
else:
code.putln("else if (unlikely(Py_TYPE(%s)->tp_dictoffset != 0)) {" % self_arg)
func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.func_node.set_cname(func_node_temp)
# need to get attribute manually--scope would return cdef method
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
err = code.error_goto_if_null(func_node_temp, self.pos)
code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
func_node_temp, self_arg, interned_attr_cname, err))
code.put_gotref(func_node_temp)
is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp
is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)%s)" % (
func_node_temp, self.py_func.entry.func_cname)
code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden))
self.body.generate_execution_code(code)
code.putln("}")
code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(func_node_temp)
code.putln("}")
class ClassDefNode(StatNode, BlockNode):
pass
class PyClassDefNode(ClassDefNode):
# A Python class definition.
#
# name EncodedString Name of the class
# doc string or None
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
# decorators [DecoratorNode] list of decorators or None
#
# The following subnodes are constructed internally:
#
# dict DictNode Class dictionary or Py3 namespace
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result",
"target", "class_cell", "decorators"]
decorators = None
class_result = None
is_py3_style_class = False # Python3 style class (kwargs)
metaclass = None
mkw = None
def __init__(self, pos, name, bases, doc, body, decorators=None,
keyword_args=None, force_py3_semantics=False):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
self.decorators = decorators
self.bases = bases
from . import ExprNodes
if self.doc and Options.docstrings:
doc = embed_position(self.pos, self.doc)
doc_node = ExprNodes.StringNode(pos, value=doc)
else:
doc_node = None
allow_py2_metaclass = not force_py3_semantics
if keyword_args:
allow_py2_metaclass = False
self.is_py3_style_class = True
if keyword_args.is_dict_literal:
if keyword_args.key_value_pairs:
for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]:
if item.key.value == 'metaclass':
if self.metaclass is not None:
error(item.pos, "keyword argument 'metaclass' passed multiple times")
# special case: we already know the metaclass,
# so we don't need to do the "build kwargs,
# find metaclass" dance at runtime
self.metaclass = item.value
del keyword_args.key_value_pairs[i]
self.mkw = keyword_args
else:
assert self.metaclass is not None
else:
# MergedDictNode
self.mkw = ExprNodes.ProxyNode(keyword_args)
if force_py3_semantics or self.bases or self.mkw or self.metaclass:
if self.metaclass is None:
if keyword_args and not keyword_args.is_dict_literal:
# **kwargs may contain 'metaclass' arg
mkdict = self.mkw
else:
mkdict = None
if (not mkdict and
self.bases.is_sequence_constructor and
not self.bases.args):
pass # no base classes => no inherited metaclass
else:
self.metaclass = ExprNodes.PyClassMetaclassNode(
pos, mkw=mkdict, bases=self.bases)
needs_metaclass_calculation = False
else:
needs_metaclass_calculation = True
self.dict = ExprNodes.PyClassNamespaceNode(
pos, name=name, doc=doc_node,
metaclass=self.metaclass, bases=self.bases, mkw=self.mkw)
self.classobj = ExprNodes.Py3ClassNode(
pos, name=name,
bases=self.bases, dict=self.dict, doc=doc_node,
metaclass=self.metaclass, mkw=self.mkw,
calculate_metaclass=needs_metaclass_calculation,
allow_py2_metaclass=allow_py2_metaclass)
else:
# no bases, no metaclass => old style class creation
self.dict = ExprNodes.DictNode(pos, key_value_pairs=[])
self.classobj = ExprNodes.ClassNode(
pos, name=name,
bases=bases, dict=self.dict, doc=doc_node)
self.target = ExprNodes.NameNode(pos, name=name)
self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos)
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
"""
if self.is_py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
bases = self.classobj.bases.args
if len(bases) == 0:
base_class_name = None
base_class_module = None
elif len(bases) == 1:
base = bases[0]
path = []
from .ExprNodes import AttributeNode, NameNode
while isinstance(base, AttributeNode):
path.insert(0, base.attribute)
base = base.obj
if isinstance(base, NameNode):
path.insert(0, base.name)
base_class_name = path[-1]
if len(path) > 1:
base_class_module = u'.'.join(path[:-1])
else:
base_class_module = None
else:
error(self.classobj.bases.args.pos, "Invalid base class")
else:
error(self.classobj.bases.args.pos, "C class may only have one base class")
return None
return CClassDefNode(self.pos,
visibility = 'private',
module_name = None,
class_name = self.name,
base_class_module = base_class_module,
base_class_name = base_class_name,
decorators = self.decorators,
body = self.body,
in_pxd = False,
doc = self.doc)
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name = self.name, outer_scope = genv)
return cenv
def analyse_declarations(self, env):
class_result = self.classobj
if self.decorators:
from .ExprNodes import SimpleCallNode
for decorator in self.decorators[::-1]:
class_result = SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [class_result])
self.decorators = None
self.class_result = class_result
self.class_result.analyse_declarations(env)
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
self.body.analyse_declarations(cenv)
def analyse_expressions(self, env):
if self.bases:
self.bases = self.bases.analyse_expressions(env)
if self.metaclass:
self.metaclass = self.metaclass.analyse_expressions(env)
if self.mkw:
self.mkw = self.mkw.analyse_expressions(env)
self.dict = self.dict.analyse_expressions(env)
self.class_result = self.class_result.analyse_expressions(env)
genv = env.global_scope()
cenv = self.scope
self.body = self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
self.class_cell = self.class_cell.analyse_expressions(cenv)
return self
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.pyclass_stack.append(self)
cenv = self.scope
if self.bases:
self.bases.generate_evaluation_code(code)
if self.mkw:
self.mkw.generate_evaluation_code(code)
if self.metaclass:
self.metaclass.generate_evaluation_code(code)
self.dict.generate_evaluation_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
self.class_cell.generate_evaluation_code(code)
self.body.generate_execution_code(code)
self.class_result.generate_evaluation_code(code)
self.class_cell.generate_injection_code(
code, self.class_result.result())
self.class_cell.generate_disposal_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result()
self.target.generate_assignment_code(self.class_result, code)
self.dict.generate_disposal_code(code)
self.dict.free_temps(code)
if self.metaclass:
self.metaclass.generate_disposal_code(code)
self.metaclass.free_temps(code)
if self.mkw:
self.mkw.generate_disposal_code(code)
self.mkw.free_temps(code)
if self.bases:
self.bases.generate_disposal_code(code)
self.bases.free_temps(code)
code.pyclass_stack.pop()
class CClassDefNode(ClassDefNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# api boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# base_class_module string or None Module containing the base class
# base_class_name string or None Name of the base class
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
# decorators [DecoratorNode] list of decorators or None
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
# buffer_defaults_node DictNode or None Declares defaults for a buffer
# buffer_defaults_pos
child_attrs = ["body"]
buffer_defaults_node = None
buffer_defaults_pos = None
typedef_flag = False
api = False
objstruct_name = None
typeobj_name = None
decorators = None
shadow = False
def buffer_defaults(self, env):
if not hasattr(self, '_buffer_defaults'):
from . import Buffer
if self.buffer_defaults_node:
self._buffer_defaults = Buffer.analyse_buffer_options(
self.buffer_defaults_pos,
env, [], self.buffer_defaults_node,
need_complete=False)
else:
self._buffer_defaults = None
return self._buffer_defaults
def declare(self, env):
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return None
else:
home_scope = env
self.entry = home_scope.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = 0,
implementing = 0,
module_name = self.module_name,
base_type = None,
objstruct_cname = self.objstruct_name,
typeobj_cname = self.typeobj_name,
visibility = self.visibility,
typedef_flag = self.typedef_flag,
api = self.api,
buffer_defaults = self.buffer_defaults(env),
shadow = self.shadow)
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.objstruct_name:
error(self.pos, "Object struct name specification required for "
"C class defined in 'extern from' block")
if self.decorators:
error(self.pos,
"Decorators not allowed on cdef classes (used on type '%s')" % self.class_name)
self.base_type = None
# Now that module imports are cached, we need to
# import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
if module.name == self.module_name:
self.module = module
if self.module is None:
self.module = ModuleScope(self.module_name, None, env.context)
self.module.has_extern_class = 1
env.add_imported_module(self.module)
if self.base_class_name:
if self.base_class_module:
base_class_scope = env.find_module(self.base_class_module, self.pos)
else:
base_class_scope = env
if self.base_class_name == 'object':
# extension classes are special and don't need to inherit from object
if base_class_scope is None or base_class_scope.lookup('object') is None:
self.base_class_name = None
self.base_class_module = None
base_class_scope = None
if base_class_scope:
base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
if base_class_entry:
if not base_class_entry.is_type:
error(self.pos, "'%s' is not a type name" % self.base_class_name)
elif not base_class_entry.type.is_extension_type and \
not (base_class_entry.type.is_builtin_type and
base_class_entry.type.objstruct_cname):
error(self.pos, "'%s' is not an extension type" % self.base_class_name)
elif not base_class_entry.type.is_complete():
error(self.pos, "Base class '%s' of type '%s' is incomplete" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \
base_class_entry.type.is_final_type:
error(self.pos, "Base class '%s' of type '%s' is final" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.is_builtin_type and \
base_class_entry.type.name in ('tuple', 'str', 'bytes'):
error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
% base_class_entry.type.name)
else:
self.base_type = base_class_entry.type
if env.directives.get('freelist', 0) > 0:
warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
# To properly initialize inherited attributes, the base type must
# be analysed before this type.
self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env))
return
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
if self.visibility == 'extern':
if (self.module_name == '__builtin__' and
self.class_name in Builtin.builtin_types and
env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = has_body and self.in_pxd,
implementing = has_body and not self.in_pxd,
module_name = self.module_name,
base_type = self.base_type,
objstruct_cname = self.objstruct_name,
typeobj_cname = self.typeobj_name,
visibility = self.visibility,
typedef_flag = self.typedef_flag,
api = self.api,
buffer_defaults = self.buffer_defaults(env),
shadow = self.shadow)
if self.shadow:
home_scope.lookup(self.class_name).as_variable = self.entry
if home_scope is not env and self.visibility == 'extern':
env.add_imported_entry(self.class_name, self.entry, self.pos)
self.scope = scope = self.entry.type.scope
if scope is not None:
scope.directives = env.directives
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
thunk()
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
return self
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
code.mark_pos(self.pos)
if self.body:
self.body.generate_execution_code(code)
def annotate(self, code):
if self.body:
self.body.annotate(code)
class PropertyNode(StatNode):
# Definition of a property in an extension type.
#
# name string
# doc EncodedString or None Doc string
# entry Symtab.Entry
# body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
self.entry = env.declare_property(self.name, self.doc, self.pos)
self.entry.scope.directives = env.directives
self.body.analyse_declarations(self.entry.scope)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class NonlocalNode(StatNode):
# Nonlocal variable declaration via the 'nonlocal' keyword.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_nonlocal(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class ExprStatNode(StatNode):
# Expression used as a statement.
#
# expr ExprNode
child_attrs = ["expr"]
def analyse_declarations(self, env):
from . import ExprNodes
if isinstance(self.expr, ExprNodes.GeneralCallNode):
func = self.expr.function.as_cython_attribute()
if func == u'declare':
args, kwds = self.expr.explicit_args_kwds()
if len(args):
error(self.expr.pos, "Variable names must be specified.")
for var, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
env.declare_var(var.value, type, var.pos, is_cdef = True)
self.__class__ = PassStatNode
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
return self
def nogil_check(self, env):
if self.expr.type.is_pyobject and self.expr.is_temp:
self.gil_error()
gil_message = "Discarding owned Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
code.putln("%s;" % self.expr.result())
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class AssignmentNode(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def analyse_expressions(self, env):
node = self.analyse_types(env)
if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode):
if node.rhs.type.is_ptr and node.rhs.is_ephemeral():
error(self.pos, "Storing unsafe C derivative of temporary Python reference")
return node
# def analyse_expressions(self, env):
# self.analyse_expressions_1(env)
# self.analyse_expressions_2(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
#
# a = b
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
# is_overloaded_assignment bool Is this assignment done via an overloaded operator=
child_attrs = ["lhs", "rhs"]
first = False
is_overloaded_assignment = False
declaration_only = False
def analyse_declarations(self, env):
from . import ExprNodes
# handle declarations of the form x = cython.foo()
if isinstance(self.rhs, ExprNodes.CallNode):
func_name = self.rhs.function.as_cython_attribute()
if func_name:
args, kwds = self.rhs.explicit_args_kwds()
if func_name in ['declare', 'typedef']:
if len(args) > 2 or kwds is not None:
error(self.rhs.pos, "Can only declare one type at a time.")
return
type = args[0].analyse_as_type(env)
if type is None:
error(args[0].pos, "Unknown type")
return
lhs = self.lhs
if func_name == 'declare':
if isinstance(lhs, ExprNodes.NameNode):
vars = [(lhs.name, lhs.pos)]
elif isinstance(lhs, ExprNodes.TupleNode):
vars = [(var.name, var.pos) for var in lhs.args]
else:
error(lhs.pos, "Invalid declaration")
return
for var, pos in vars:
env.declare_var(var, type, pos, is_cdef = True)
if len(args) == 2:
# we have a value
self.rhs = args[1]
else:
self.declaration_only = True
else:
self.declaration_only = True
if not isinstance(lhs, ExprNodes.NameNode):
error(lhs.pos, "Invalid declaration.")
env.declare_typedef(lhs.name, type, self.pos, visibility='private')
elif func_name in ['struct', 'union']:
self.declaration_only = True
if len(args) > 0 or kwds is None:
error(self.rhs.pos, "Struct or union members must be given by name.")
return
members = []
for member, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
members.append((member.value, type, member.pos))
if len(members) < len(kwds.key_value_pairs):
return
if not isinstance(self.lhs, ExprNodes.NameNode):
error(self.lhs.pos, "Invalid declaration.")
name = self.lhs.name
scope = StructOrUnionScope(name)
env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
for member, type, pos in members:
scope.declare_var(member, type, pos)
elif func_name == 'fused_type':
# dtype = cython.fused_type(...)
self.declaration_only = True
if kwds:
error(self.rhs.function.pos,
"fused_type does not take keyword arguments")
fusednode = FusedTypeNode(self.rhs.pos,
name=self.lhs.name, types=args)
fusednode.analyse_declarations(env)
if self.declaration_only:
return
else:
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp=0):
from . import ExprNodes
self.rhs = self.rhs.analyse_types(env)
unrolled_assignment = self.unroll_rhs(env)
if unrolled_assignment:
return unrolled_assignment
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
unrolled_assignment = self.unroll_lhs(env)
if unrolled_assignment:
return unrolled_assignment
if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode):
self.lhs.analyse_broadcast_operation(self.rhs)
self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs)
elif self.lhs.type.is_array:
if not isinstance(self.lhs, ExprNodes.SliceIndexNode):
# cannot assign to C array, only to its full slice
self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None)
self.lhs = self.lhs.analyse_target_types(env)
if self.lhs.type.is_cpp_class:
op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type])
if op:
rhs = self.rhs
self.is_overloaded_assignment = True
else:
rhs = self.rhs.coerce_to(self.lhs.type, env)
else:
rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp or rhs.is_attribute or (
not rhs.is_name and not rhs.is_literal and
rhs.type.is_pyobject):
# things like (cdef) attribute access are not safe (traverses pointers)
rhs = rhs.coerce_to_temp(env)
elif rhs.type.is_pyobject:
rhs = rhs.coerce_to_simple(env)
self.rhs = rhs
return self
def unroll(self, node, target_size, env):
from . import ExprNodes, UtilNodes
base = node
start_node = stop_node = step_node = check_node = None
if node.type.is_ctuple:
slice_size = node.type.size
elif node.type.is_ptr or node.type.is_array:
while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop):
base = node = node.base
if isinstance(node, ExprNodes.SliceIndexNode):
base = node.base
start_node = node.start
if start_node:
start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
stop_node = node.stop
if stop_node:
stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
else:
if node.type.is_array and node.type.size:
stop_node = ExprNodes.IntNode(
self.pos, value=str(node.type.size),
constant_result=(node.type.size if isinstance(node.type.size, _py_int_types)
else ExprNodes.constant_value_not_set))
else:
error(self.pos, "C array iteration requires known end index")
return
step_node = None #node.step
if step_node:
step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
# TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here.
def get_const(node, none_value):
if node is None:
return none_value
elif node.has_constant_result():
return node.constant_result
else:
raise ValueError("Not a constant.")
try:
slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1)
except ValueError:
error(self.pos, "C array assignment currently requires known endpoints")
return
elif node.type.is_array:
slice_size = node.type.size
if not isinstance(slice_size, _py_int_types):
return # might still work when coercing to Python
else:
return
else:
return
if slice_size != target_size:
error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
return
items = []
base = UtilNodes.LetRefNode(base)
refs = [base]
if start_node and not start_node.is_literal:
start_node = UtilNodes.LetRefNode(start_node)
refs.append(start_node)
if stop_node and not stop_node.is_literal:
stop_node = UtilNodes.LetRefNode(stop_node)
refs.append(stop_node)
if step_node and not step_node.is_literal:
step_node = UtilNodes.LetRefNode(step_node)
refs.append(step_node)
for ix in range(target_size):
ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type)
if step_node is not None:
if step_node.has_constant_result():
step_value = ix_node.constant_result * step_node.constant_result
ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value)
else:
ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node)
if start_node is not None:
if start_node.has_constant_result() and ix_node.has_constant_result():
index_value = ix_node.constant_result + start_node.constant_result
ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value)
else:
ix_node = ExprNodes.AddNode(
self.pos, operator='+', operand1=start_node, operand2=ix_node)
items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env)))
return check_node, refs, items
def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env):
from . import UtilNodes
assignments = []
for lhs, rhs in zip(lhs_list, rhs_list):
assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first))
node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env)
if check_node:
node = StatListNode(pos=self.pos, stats=[check_node, node])
for ref in refs[::-1]:
node = UtilNodes.LetNode(ref, node)
return node
def unroll_rhs(self, env):
from . import ExprNodes
if not isinstance(self.lhs, ExprNodes.TupleNode):
return
if any(arg.is_starred for arg in self.lhs.args):
return
unrolled = self.unroll(self.rhs, len(self.lhs.args), env)
if not unrolled:
return
check_node, refs, rhs = unrolled
return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env)
def unroll_lhs(self, env):
if self.lhs.type.is_ctuple:
# Handled directly.
return
from . import ExprNodes
if not isinstance(self.rhs, ExprNodes.TupleNode):
return
unrolled = self.unroll(self.lhs, len(self.rhs.args), env)
if not unrolled:
return
check_node, refs, lhs = unrolled
return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code, overloaded_assignment=False):
self.lhs.generate_assignment_code(
self.rhs, code, overloaded_assignment=self.is_overloaded_assignment)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class CascadedAssignmentNode(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_values [ExprNode] RHS coerced to all distinct LHS types
# cloned_values [ExprNode] cloned RHS value for each LHS
# assignment_overloads [Bool] If each assignment uses a C++ operator=
child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"]
cloned_values = None
coerced_values = None
assignment_overloads = None
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp=0):
from .ExprNodes import CloneNode, ProxyNode
# collect distinct types used on the LHS
lhs_types = set()
for lhs in self.lhs_list:
lhs.analyse_target_types(env)
lhs.gil_assignment_check(env)
lhs_types.add(lhs.type)
rhs = self.rhs.analyse_types(env)
# common special case: only one type needed on the LHS => coerce only once
if len(lhs_types) == 1:
# Avoid coercion for overloaded assignment operators.
if next(iter(lhs_types)).is_cpp_class:
op = env.lookup_operator('=', [lhs, self.rhs])
if not op:
rhs = rhs.coerce_to(lhs_types.pop(), env)
else:
rhs = rhs.coerce_to(lhs_types.pop(), env)
if not rhs.is_name and not rhs.is_literal and (
use_temp or rhs.is_attribute or rhs.type.is_pyobject):
rhs = rhs.coerce_to_temp(env)
else:
rhs = rhs.coerce_to_simple(env)
self.rhs = ProxyNode(rhs) if rhs.is_temp else rhs
# clone RHS and coerce it to all distinct LHS types
self.coerced_values = []
coerced_values = {}
self.assignment_overloads = []
for lhs in self.lhs_list:
overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs])
self.assignment_overloads.append(overloaded)
if lhs.type not in coerced_values and lhs.type != rhs.type:
rhs = CloneNode(self.rhs)
if not overloaded:
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_values.append(rhs)
coerced_values[lhs.type] = rhs
# clone coerced values for all LHS assignments
self.cloned_values = []
for lhs in self.lhs_list:
rhs = coerced_values.get(lhs.type, self.rhs)
self.cloned_values.append(CloneNode(rhs))
return self
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code, overloaded_assignment=False):
# prepare all coercions
for rhs in self.coerced_values:
rhs.generate_evaluation_code(code)
# assign clones to LHS
for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads):
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload)
# dispose of coerced values and original RHS
for rhs_value in self.coerced_values:
rhs_value.generate_disposal_code(code)
rhs_value.free_temps(code)
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
for rhs in self.coerced_values:
rhs.annotate(code)
for lhs, rhs in zip(self.lhs_list, self.cloned_values):
lhs.annotate(code)
rhs.annotate(code)
self.rhs.annotate(code)
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
#
# a, b, c = d, e, f
#
# This has been rearranged by the parser into
#
# a = d ; b = e ; c = f
#
# but we must evaluate all the right hand sides
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
child_attrs = ["stats"]
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
self.stats = [ stat.analyse_types(env, use_temp = 1)
for stat in self.stats ]
return self
# def analyse_expressions(self, env):
# for stat in self.stats:
# stat.analyse_expressions_1(env, use_temp = 1)
# for stat in self.stats:
# stat.analyse_expressions_2(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
for stat in self.stats:
stat.generate_assignment_code(code)
def generate_function_definitions(self, env, code):
for stat in self.stats:
stat.generate_function_definitions(env, code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class InPlaceAssignmentNode(AssignmentNode):
# An in place arithmetic operand:
#
# a += b
# a -= b
# ...
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# operator char one of "+-*/%^&|"
#
# This code is a bit tricky because in order to obey Python
# semantics the sub-expressions (e.g. indices) of the lhs must
# not be evaluated twice. So we must re-use the values calculated
# in evaluation phase for the assignment phase as well.
# Fortunately, the type of the lhs node is fairly constrained
# (it must be a NameNode, AttributeNode, or IndexNode).
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env):
self.rhs = self.rhs.analyse_types(env)
self.lhs = self.lhs.analyse_target_types(env)
# When assigning to a fully indexed buffer or memoryview, coerce the rhs
if self.lhs.is_memview_index or self.lhs.is_buffer_access:
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
elif self.lhs.type.is_string and self.operator in '+-':
# use pointer arithmetic for char* LHS instead of string concat
self.rhs = self.rhs.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
lhs, rhs = self.lhs, self.rhs
rhs.generate_evaluation_code(code)
lhs.generate_subexpr_evaluation_code(code)
c_op = self.operator
if c_op == "//":
c_op = "/"
elif c_op == "**":
error(self.pos, "No C inplace power operator")
if lhs.is_buffer_access or lhs.is_memview_index:
if lhs.type.is_pyobject:
error(self.pos, "In-place operators not allowed on object buffers in this release.")
if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']:
error(self.pos, "In-place non-c divide operators not allowed on int buffers.")
lhs.generate_buffer_setitem_code(rhs, code, c_op)
elif lhs.is_memview_slice:
error(self.pos, "Inplace operators not supported on memoryview slices")
else:
# C++
# TODO: make sure overload is declared
code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result()))
lhs.generate_subexpr_disposal_code(code)
lhs.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
def create_binop_node(self):
from . import ExprNodes
return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
class PrintStatNode(StatNode):
# print statement
#
# arg_tuple TupleNode
# stream ExprNode or None (stdout)
# append_newline boolean
child_attrs = ["arg_tuple", "stream"]
def analyse_expressions(self, env):
if self.stream:
stream = self.stream.analyse_expressions(env)
self.stream = stream.coerce_to_pyobject(env)
arg_tuple = self.arg_tuple.analyse_expressions(env)
self.arg_tuple = arg_tuple.coerce_to_pyobject(env)
env.use_utility_code(printing_utility_code)
if len(self.arg_tuple.args) == 1 and self.append_newline:
env.use_utility_code(printing_one_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python print statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.stream:
self.stream.generate_evaluation_code(code)
stream_result = self.stream.py_result()
else:
stream_result = '0'
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
arg.py_result(),
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
self.arg_tuple.generate_evaluation_code(code)
code.putln(
"if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
stream_result,
self.arg_tuple.py_result(),
self.append_newline,
code.error_goto(self.pos)))
self.arg_tuple.generate_disposal_code(code)
self.arg_tuple.free_temps(code)
if self.stream:
self.stream.generate_disposal_code(code)
self.stream.free_temps(code)
def generate_function_definitions(self, env, code):
if self.stream:
self.stream.generate_function_definitions(env, code)
self.arg_tuple.generate_function_definitions(env, code)
def annotate(self, code):
if self.stream:
self.stream.annotate(code)
self.arg_tuple.annotate(code)
class ExecStatNode(StatNode):
# exec statement
#
# args [ExprNode]
child_attrs = ["args"]
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = arg.analyse_expressions(env)
arg = arg.coerce_to_pyobject(env)
self.args[i] = arg
env.use_utility_code(Builtin.pyexec_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python exec statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
args = []
for arg in self.args:
arg.generate_evaluation_code(code)
args.append( arg.py_result() )
args = tuple(args + ['0', '0'][:3-len(args)])
temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % (
(temp_result,) + args))
for arg in self.args:
arg.generate_disposal_code(code)
arg.free_temps(code)
code.putln(
code.error_goto_if_null(temp_result, self.pos))
code.put_gotref(temp_result)
code.put_decref_clear(temp_result, py_object_type)
code.funcstate.release_temp(temp_result)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
child_attrs = ["args"]
ignore_nonexisting = False
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_expression(env, None)
if arg.type.is_pyobject or (arg.is_name and
arg.type.is_memoryviewslice):
if arg.is_name and arg.entry.is_cglobal:
error(arg.pos, "Deletion of global C variable")
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
self.cpp_check(env)
elif arg.type.is_cpp_class:
error(arg.pos, "Deletion of non-heap C++ object")
elif arg.is_subscript and arg.base.type is Builtin.bytearray_type:
pass # del ba[i]
else:
error(arg.pos, "Deletion of non-Python, non-C++ object")
#arg.release_target_temp(env)
return self
def nogil_check(self, env):
for arg in self.args:
if arg.type.is_pyobject:
self.gil_error()
gil_message = "Deleting Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for arg in self.args:
if (arg.type.is_pyobject or
arg.type.is_memoryviewslice or
arg.is_subscript and arg.base.type is Builtin.bytearray_type):
arg.generate_deletion_code(
code, ignore_nonexisting=self.ignore_nonexisting)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
arg.generate_result_code(code)
code.putln("delete %s;" % arg.result())
# else error reported earlier
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class PassStatNode(StatNode):
# pass statement
child_attrs = []
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class IndirectionNode(StatListNode):
"""
This adds an indirection so that the node can be shared and a subtree can
be removed at any time by clearing self.stats.
"""
def __init__(self, stats):
super(IndirectionNode, self).__init__(stats[0].pos, stats=stats)
class BreakStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not code.break_label:
error(self.pos, "break statement not inside loop")
else:
code.put_goto(code.break_label)
class ContinueStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if code.funcstate.in_try_finally:
error(self.pos, "continue statement inside try of try...finally")
elif not code.continue_label:
error(self.pos, "continue statement not inside loop")
else:
code.put_goto(code.continue_label)
class ReturnStatNode(StatNode):
# return statement
#
# value ExprNode or None
# return_type PyrexType
# in_generator return inside of generator => raise StopIteration
child_attrs = ["value"]
is_terminator = True
in_generator = False
# Whether we are in a parallel section
in_parallel = False
def analyse_expressions(self, env):
return_type = env.return_type
self.return_type = return_type
if not return_type:
error(self.pos, "Return not inside a function body")
return self
if self.value:
self.value = self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos,
"Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
else:
if (not return_type.is_void
and not return_type.is_pyobject
and not return_type.is_returncode):
error(self.pos, "Return value required")
return self
def nogil_check(self, env):
if self.return_type.is_pyobject:
self.gil_error()
gil_message = "Returning Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not self.return_type:
# error reported earlier
return
if self.return_type.is_pyobject:
code.put_xdecref(Naming.retval_cname,
self.return_type)
if self.value:
self.value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
lhs_pos=self.value.pos,
rhs=self.value,
code=code,
have_gil=self.in_nogil_context)
elif self.in_generator:
# return value == raise StopIteration(value), but uncatchable
code.globalstate.use_utility_code(
UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
Naming.retval_cname,
self.value.py_result()))
self.value.generate_disposal_code(code)
else:
self.value.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.value.result_as(self.return_type)))
self.value.generate_post_assignment_code(code)
self.value.free_temps(code)
else:
if self.return_type.is_pyobject:
if self.in_generator:
code.putln("%s = NULL;" % Naming.retval_cname)
else:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
elif self.return_type.is_returncode:
self.put_return(code, self.return_type.default_value)
for cname, type in code.funcstate.temps_holding_reference():
code.put_decref_clear(cname, type)
code.put_goto(code.return_label)
def put_return(self, code, value):
if self.in_parallel:
code.putln_openmp("#pragma omp critical(__pyx_returning)")
code.putln("%s = %s;" % (Naming.retval_cname, value))
def generate_function_definitions(self, env, code):
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
if self.value:
self.value.annotate(code)
class RaiseStatNode(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
# cause ExprNode or None
child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
is_terminator = True
def analyse_expressions(self, env):
if self.exc_type:
exc_type = self.exc_type.analyse_types(env)
self.exc_type = exc_type.coerce_to_pyobject(env)
if self.exc_value:
exc_value = self.exc_value.analyse_types(env)
self.exc_value = exc_value.coerce_to_pyobject(env)
if self.exc_tb:
exc_tb = self.exc_tb.analyse_types(env)
self.exc_tb = exc_tb.coerce_to_pyobject(env)
if self.cause:
cause = self.cause.analyse_types(env)
self.cause = cause.coerce_to_pyobject(env)
# special cases for builtin exceptions
self.builtin_exc_name = None
if self.exc_type and not self.exc_value and not self.exc_tb:
exc = self.exc_type
from . import ExprNodes
if (isinstance(exc, ExprNodes.SimpleCallNode) and
not (exc.args or (exc.arg_tuple is not None and
exc.arg_tuple.args))):
exc = exc.function # extract the exception type
if exc.is_name and exc.entry.is_builtin:
self.builtin_exc_name = exc.name
if self.builtin_exc_name == 'MemoryError':
self.exc_type = None # has a separate implementation
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.builtin_exc_name == 'MemoryError':
code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
else:
type_code = "0"
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = "0"
if self.cause:
self.cause.generate_evaluation_code(code)
cause_code = self.cause.py_result()
else:
cause_code = "0"
code.globalstate.use_utility_code(raise_utility_code)
code.putln(
"__Pyx_Raise(%s, %s, %s, %s);" % (
type_code,
value_code,
tb_code,
cause_code))
for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
if obj:
obj.generate_disposal_code(code)
obj.free_temps(code)
code.putln(
code.error_goto(self.pos))
def generate_function_definitions(self, env, code):
if self.exc_type is not None:
self.exc_type.generate_function_definitions(env, code)
if self.exc_value is not None:
self.exc_value.generate_function_definitions(env, code)
if self.exc_tb is not None:
self.exc_tb.generate_function_definitions(env, code)
if self.cause is not None:
self.cause.generate_function_definitions(env, code)
def annotate(self, code):
if self.exc_type:
self.exc_type.annotate(code)
if self.exc_value:
self.exc_value.annotate(code)
if self.exc_tb:
self.exc_tb.annotate(code)
if self.cause:
self.cause.annotate(code)
class ReraiseStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
vars = code.funcstate.exc_vars
if vars:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.put_giveref(vars[0])
code.put_giveref(vars[1])
# fresh exceptions may not have a traceback yet (-> finally!)
code.put_xgiveref(vars[2])
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(vars))
for varname in vars:
code.put("%s = 0; " % varname)
code.putln()
code.putln(code.error_goto(self.pos))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ReRaiseException", "Exceptions.c"))
code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos))
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
child_attrs = ["cond", "value"]
def analyse_expressions(self, env):
self.cond = self.cond.analyse_boolean_expression(env)
if self.value:
value = self.value.analyse_types(env)
if value.type is Builtin.tuple_type or not value.type.is_builtin_type:
# prevent tuple values from being interpreted as argument value tuples
from .ExprNodes import TupleNode
value = TupleNode(value.pos, args=[value], slow=True)
self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
else:
self.value = value.coerce_to_pyobject(env)
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
code.putln("if (unlikely(!Py_OptimizeFlag)) {")
code.mark_pos(self.pos)
self.cond.generate_evaluation_code(code)
code.putln(
"if (unlikely(!%s)) {" %
self.cond.result())
if self.value:
self.value.generate_evaluation_code(code)
code.putln(
"PyErr_SetObject(PyExc_AssertionError, %s);" %
self.value.py_result())
self.value.generate_disposal_code(code)
self.value.free_temps(code)
else:
code.putln(
"PyErr_SetNone(PyExc_AssertionError);")
code.putln(
code.error_goto(self.pos))
code.putln(
"}")
self.cond.generate_disposal_code(code)
self.cond.free_temps(code)
code.putln(
"}")
code.putln("#endif")
def generate_function_definitions(self, env, code):
self.cond.generate_function_definitions(env, code)
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
self.cond.annotate(code)
if self.value:
self.value.annotate(code)
class IfStatNode(StatNode):
# if statement
#
# if_clauses [IfClauseNode]
# else_clause StatNode or None
child_attrs = ["if_clauses", "else_clause"]
def analyse_declarations(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses]
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
if not self.else_clause:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(end_label)
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
for if_clause in self.if_clauses:
if_clause.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class IfClauseNode(Node):
# if or elif clause in an if statement
#
# condition ExprNode
# body StatNode
child_attrs = ["condition", "body"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code, end_label, is_last):
self.condition.generate_evaluation_code(code)
code.mark_pos(self.pos)
code.putln("if (%s) {" % self.condition.result())
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
if not (is_last or self.body.is_terminator):
code.put_goto(end_label)
code.putln("}")
def generate_function_definitions(self, env, code):
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.condition.annotate(code)
self.body.annotate(code)
class SwitchCaseNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# conditions [ExprNode]
# body StatNode
child_attrs = ['conditions', 'body']
def generate_execution_code(self, code):
for cond in self.conditions:
code.mark_pos(cond.pos)
cond.generate_evaluation_code(code)
code.putln("case %s:" % cond.result())
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
code.putln("break;")
def generate_function_definitions(self, env, code):
for cond in self.conditions:
cond.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
self.body.annotate(code)
class SwitchStatNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# test ExprNode
# cases [SwitchCaseNode]
# else_clause StatNode or None
child_attrs = ['test', 'cases', 'else_clause']
def generate_execution_code(self, code):
self.test.generate_evaluation_code(code)
code.mark_pos(self.pos)
code.putln("switch (%s) {" % self.test.result())
for case in self.cases:
case.generate_execution_code(code)
if self.else_clause is not None:
code.putln("default:")
self.else_clause.generate_execution_code(code)
code.putln("break;")
else:
# Always generate a default clause to prevent C compiler warnings
# about unmatched enum values (it was not the user who decided to
# generate the switch statement, so shouldn't be bothered).
code.putln("default: break;")
code.putln("}")
def generate_function_definitions(self, env, code):
self.test.generate_function_definitions(env, code)
for case in self.cases:
case.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.test.annotate(code)
for case in self.cases:
case.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
class LoopNode(object):
pass
class WhileStatNode(LoopNode, StatNode):
# while statement
#
# condition ExprNode
# body StatNode
# else_clause StatNode
child_attrs = ["condition", "body", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
if self.condition:
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
code.putln(
"while (1) {")
if self.condition:
self.condition.generate_evaluation_code(code)
self.condition.generate_disposal_code(code)
code.putln(
"if (!%s) break;" %
self.condition.result())
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
def generate_function_definitions(self, env, code):
if self.condition:
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
if self.condition:
self.condition.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class DictIterationNextNode(Node):
# Helper node for calling PyDict_Next() inside of a WhileStatNode
# and checking the dictionary size for changes. Created in
# Optimize.py.
child_attrs = ['dict_obj', 'expected_size', 'pos_index_var',
'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var',
'key_target', 'value_target', 'tuple_target', 'is_dict_flag']
coerced_key_var = key_ref = None
coerced_value_var = value_ref = None
coerced_tuple_var = tuple_ref = None
def __init__(self, dict_obj, expected_size, pos_index_var,
key_target, value_target, tuple_target, is_dict_flag):
Node.__init__(
self, dict_obj.pos,
dict_obj = dict_obj,
expected_size = expected_size,
pos_index_var = pos_index_var,
key_target = key_target,
value_target = value_target,
tuple_target = tuple_target,
is_dict_flag = is_dict_flag,
is_temp = True,
type = PyrexTypes.c_bint_type)
def analyse_expressions(self, env):
from . import ExprNodes
self.dict_obj = self.dict_obj.analyse_types(env)
self.expected_size = self.expected_size.analyse_types(env)
if self.pos_index_var:
self.pos_index_var = self.pos_index_var.analyse_types(env)
if self.key_target:
self.key_target = self.key_target.analyse_target_types(env)
self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type)
self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env)
if self.value_target:
self.value_target = self.value_target.analyse_target_types(env)
self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
if self.tuple_target:
self.tuple_target = self.tuple_target.analyse_target_types(env)
self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type)
self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env)
self.is_dict_flag = self.is_dict_flag.analyse_types(env)
return self
def generate_function_definitions(self, env, code):
self.dict_obj.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("dict_iter", "Optimize.c"))
self.dict_obj.generate_evaluation_code(code)
assignments = []
temp_addresses = []
for var, result, target in [(self.key_ref, self.coerced_key_var, self.key_target),
(self.value_ref, self.coerced_value_var, self.value_target),
(self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]:
if target is None:
addr = 'NULL'
else:
assignments.append((var, result, target))
var.allocate(code)
addr = '&%s' % var.result()
temp_addresses.append(addr)
result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % (
result_temp,
self.dict_obj.py_result(),
self.expected_size.result(),
self.pos_index_var.result(),
temp_addresses[0],
temp_addresses[1],
temp_addresses[2],
self.is_dict_flag.result()
))
code.putln("if (unlikely(%s == 0)) break;" % result_temp)
code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
code.funcstate.release_temp(result_temp)
# evaluate all coercions before the assignments
for var, result, target in assignments:
code.put_gotref(var.result())
for var, result, target in assignments:
result.generate_evaluation_code(code)
for var, result, target in assignments:
target.generate_assignment_code(result, code)
var.release(code)
def ForStatNode(pos, **kw):
if 'iterator' in kw:
if kw['iterator'].is_async:
return AsyncForStatNode(pos, **kw)
else:
return ForInStatNode(pos, **kw)
else:
return ForFromStatNode(pos, **kw)
class _ForInStatNode(LoopNode, StatNode):
# Base class of 'for-in' statements.
#
# target ExprNode
# iterator IteratorNode | AwaitExprNode(AsyncIteratorNode)
# body StatNode
# else_clause StatNode
# item NextNode | AwaitExprNode(AsyncNextNode)
# is_async boolean true for 'async for' statements
child_attrs = ["target", "item", "iterator", "body", "else_clause"]
item = None
is_async = False
def _create_item_node(self):
raise NotImplementedError("must be implemented by subclasses")
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
self._create_item_node()
def analyse_expressions(self, env):
self.target = self.target.analyse_target_types(env)
self.iterator = self.iterator.analyse_expressions(env)
self._create_item_node() # must rewrap self.item after analysis
self.item = self.item.analyse_expressions(env)
if (not self.is_async and
(self.iterator.type.is_ptr or self.iterator.type.is_array) and
self.target.type.assignable_from(self.iterator.type)):
# C array slice optimization.
pass
else:
self.item = self.item.coerce_to(self.target.type, env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
self.iterator.generate_evaluation_code(code)
code.putln("for (;;) {")
self.item.generate_evaluation_code(code)
self.target.generate_assignment_code(self.item, code)
self.body.generate_execution_code(code)
code.mark_pos(self.pos)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
# in nested loops, the 'else' block can contain a
# 'continue' statement for the outer loop, but we may need
# to generate cleanup code before taking that path, so we
# intercept it here
orig_continue_label = code.continue_label
code.continue_label = code.new_label('outer_continue')
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
if code.label_used(code.continue_label):
code.put_goto(break_label)
code.mark_pos(self.pos)
code.put_label(code.continue_label)
self.iterator.generate_disposal_code(code)
code.put_goto(orig_continue_label)
code.set_loop_labels(old_loop_labels)
code.mark_pos(self.pos)
if code.label_used(break_label):
code.put_label(break_label)
self.iterator.generate_disposal_code(code)
self.iterator.free_temps(code)
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.iterator.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.iterator.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
self.item.annotate(code)
class ForInStatNode(_ForInStatNode):
# 'for' statement
is_async = False
def _create_item_node(self):
from .ExprNodes import NextNode
self.item = NextNode(self.iterator)
class AsyncForStatNode(_ForInStatNode):
# 'async for' statement
#
# iterator AwaitExprNode(AsyncIteratorNode)
# item AwaitIterNextExprNode(AsyncIteratorNode)
is_async = True
def __init__(self, pos, iterator, **kw):
assert 'item' not in kw
from . import ExprNodes
# AwaitExprNodes must appear before running MarkClosureVisitor
kw['iterator'] = ExprNodes.AwaitExprNode(iterator.pos, arg=iterator)
kw['item'] = ExprNodes.AwaitIterNextExprNode(iterator.pos, arg=None)
_ForInStatNode.__init__(self, pos, **kw)
def _create_item_node(self):
from . import ExprNodes
self.item.arg = ExprNodes.AsyncNextNode(self.iterator)
class ForFromStatNode(LoopNode, StatNode):
# for name from expr rel name rel expr
#
# target NameNode
# bound1 ExprNode
# relation1 string
# relation2 string
# bound2 ExprNode
# step ExprNode or None
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# from_range bool
# is_py_target bool
# loopvar_node ExprNode (usually a NameNode or temp node)
# py_loopvar_node PyTempNode or None
child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"]
is_py_target = False
loopvar_node = None
py_loopvar_node = None
from_range = False
gil_message = "For-loop using object bounds or target"
def nogil_check(self, env):
for x in (self.target, self.bound1, self.bound2):
if x.type.is_pyobject:
self.gil_error()
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
from . import ExprNodes
self.target = self.target.analyse_target_types(env)
self.bound1 = self.bound1.analyse_types(env)
self.bound2 = self.bound2.analyse_types(env)
if self.step is not None:
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statement. Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
if self.target.type.is_numeric:
loop_type = self.target.type
else:
loop_type = PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type)
if self.step is not None and not self.step.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type)
self.bound1 = self.bound1.coerce_to(loop_type, env)
self.bound2 = self.bound2.coerce_to(loop_type, env)
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
target_type = self.target.type
if not (target_type.is_pyobject or target_type.is_numeric):
error(self.target.pos,
"for-from loop variable must be c numeric type or Python object")
if target_type.is_numeric:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
self.loopvar_node = self.target
self.py_loopvar_node = None
else:
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
self.py_loopvar_node = \
ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
incop = "%s=%s" % (incop[0], step)
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
if from_range:
loopvar_name = code.funcstate.allocate_temp(self.target.type, False)
else:
loopvar_name = self.loopvar_node.result()
if self.target.type.is_int and not self.target.type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
if not self.step:
step = 1
code.putln(
"for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
loopvar_name,
self.bound1.result(), offset, step,
loopvar_name, self.relation2, self.bound2.result(), step,
loopvar_name, incop))
else:
code.putln(
"for (%s = %s%s; %s %s %s; %s%s) {" % (
loopvar_name,
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
if self.py_loopvar_node:
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
elif from_range:
code.putln("%s = %s;" % (
self.target.result(), loopvar_name))
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
if self.target.entry.is_pyglobal:
# We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
if self.target.entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
lookup_func = '__Pyx_GetModuleGlobalName(%s)'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
lookup_func = '__Pyx_GetNameInClass(%s, %%s)' % (
self.target.entry.scope.namespace_cname)
code.putln("%s = %s; %s" % (
target_node.result(),
lookup_func % interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
code.put_gotref(target_node.result())
else:
target_node = self.target
from_py_node = ExprNodes.CoerceFromPyTypeNode(
self.loopvar_node.type, target_node, self.target.entry.scope)
from_py_node.temp_code = loopvar_name
from_py_node.generate_result_code(code)
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
code.putln("}")
if self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound1.free_temps(code)
self.bound2.generate_disposal_code(code)
self.bound2.free_temps(code)
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.release(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.release(code)
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--")
}
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.bound1.generate_function_definitions(env, code)
self.bound2.generate_function_definitions(env, code)
if self.step is not None:
self.step.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
self.bound2.annotate(code)
if self.step:
self.step.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class WithStatNode(StatNode):
"""
Represents a Python with statement.
Implemented by the WithTransform as follows:
MGR = EXPR
EXIT = MGR.__exit__
VALUE = MGR.__enter__()
EXC = True
try:
try:
TARGET = VALUE # optional
BODY
except:
EXC = False
if not EXIT(*EXCINFO):
raise
finally:
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = None
"""
# manager The with statement manager object
# target ExprNode the target lhs of the __enter__() call
# body StatNode
# enter_call ExprNode the call to the __enter__() method
# exit_var String the cname of the __exit__() method reference
child_attrs = ["manager", "enter_call", "target", "body"]
enter_call = None
target_temp = None
def analyse_declarations(self, env):
self.manager.analyse_declarations(env)
self.enter_call.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.manager = self.manager.analyse_types(env)
self.enter_call = self.enter_call.analyse_types(env)
if self.target:
# set up target_temp before descending into body (which uses it)
from .ExprNodes import TempNode
self.target_temp = TempNode(self.enter_call.pos, self.enter_call.type)
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.manager.generate_function_definitions(env, code)
self.enter_call.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.putln("/*with:*/ {")
self.manager.generate_evaluation_code(code)
self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % (
self.exit_var,
self.manager.py_result(),
code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')),
code.error_goto_if_null(self.exit_var, self.pos),
))
code.put_gotref(self.exit_var)
# need to free exit_var in the face of exceptions during setup
old_error_label = code.new_error_label()
intermediate_error_label = code.error_label
self.enter_call.generate_evaluation_code(code)
if self.target:
# The temp result will be cleaned up by the WithTargetAssignmentStatNode
# after assigning its result to the target of the 'with' statement.
self.target_temp.allocate(code)
self.enter_call.make_owned_reference(code)
code.putln("%s = %s;" % (self.target_temp.result(), self.enter_call.result()))
self.enter_call.generate_post_assignment_code(code)
else:
self.enter_call.generate_disposal_code(code)
self.enter_call.free_temps(code)
self.manager.generate_disposal_code(code)
self.manager.free_temps(code)
code.error_label = old_error_label
self.body.generate_execution_code(code)
if code.label_used(intermediate_error_label):
step_over_label = code.new_label()
code.put_goto(step_over_label)
code.put_label(intermediate_error_label)
code.put_decref_clear(self.exit_var, py_object_type)
code.put_goto(old_error_label)
code.put_label(step_over_label)
code.funcstate.release_temp(self.exit_var)
code.putln('}')
class WithTargetAssignmentStatNode(AssignmentNode):
# The target assignment of the 'with' statement value (return
# value of the __enter__() call).
#
# This is a special cased assignment that properly cleans up the RHS.
#
# lhs ExprNode the assignment target
# rhs ExprNode a (coerced) TempNode for the rhs (from WithStatNode)
# with_node WithStatNode the surrounding with-statement
child_attrs = ["rhs", "lhs"]
with_node = None
rhs = None
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_expressions(self, env):
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.with_node.target_temp.coerce_to(self.lhs.type, env)
return self
def generate_execution_code(self, code):
self.rhs.generate_evaluation_code(code)
self.lhs.generate_assignment_code(self.rhs, code)
self.with_node.target_temp.release(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class TryExceptStatNode(StatNode):
# try .. except statement
#
# body StatNode
# except_clauses [ExceptClauseNode]
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
default_clause_seen = 0
for i, except_clause in enumerate(self.except_clauses):
except_clause = self.except_clauses[i] = except_clause.analyse_expressions(env)
if default_clause_seen:
error(except_clause.pos, "default 'except:' must be last")
if not except_clause.pattern:
default_clause_seen = 1
self.has_default_clause = default_clause_seen
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
nogil_check = Node.gil_error
gil_message = "Try-except statement"
def generate_execution_code(self, code):
old_return_label = code.return_label
old_break_label = code.break_label
old_continue_label = code.continue_label
old_error_label = code.new_error_label()
our_error_label = code.error_label
except_end_label = code.new_label('exception_handled')
except_error_label = code.new_label('except_error')
except_return_label = code.new_label('except_return')
try_return_label = code.new_label('try_return')
try_break_label = code.new_label('try_break')
try_continue_label = code.new_label('try_continue')
try_end_label = code.new_label('try_end')
exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
for _ in range(3)]
code.mark_pos(self.pos)
code.putln("{")
save_exc = code.insertion_point()
code.putln(
"/*try:*/ {")
code.return_label = try_return_label
code.break_label = try_break_label
code.continue_label = try_continue_label
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
code.putln(
"}")
temps_to_clean_up = code.funcstate.all_free_managed_temps()
can_raise = code.label_used(our_error_label)
if can_raise:
# inject code before the try block to save away the exception state
code.globalstate.use_utility_code(reset_exception_utility_code)
save_exc.putln("__Pyx_ExceptionSave(%s);" %
', '.join(['&%s' % var for var in exc_save_vars]))
for var in exc_save_vars:
save_exc.put_xgotref(var)
def restore_saved_exception():
for name in exc_save_vars:
code.put_xgiveref(name)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
else:
# try block cannot raise exceptions, but we had to allocate the temps above,
# so just keep the C compiler from complaining about them being unused
save_exc.putln("if (%s); else {/*mark used*/}" % '||'.join(exc_save_vars))
def restore_saved_exception():
pass
code.error_label = except_error_label
code.return_label = except_return_label
normal_case_terminates = self.body.is_terminator
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln(
"/*else:*/ {")
self.else_clause.generate_execution_code(code)
code.putln(
"}")
if not normal_case_terminates:
normal_case_terminates = self.else_clause.is_terminator
if can_raise:
if not normal_case_terminates:
for var in exc_save_vars:
code.put_xdecref_clear(var, py_object_type)
code.put_goto(try_end_label)
code.put_label(our_error_label)
for temp_name, temp_type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, temp_type)
for except_clause in self.except_clauses:
except_clause.generate_handling_code(code, except_end_label)
if not self.has_default_clause:
code.put_goto(except_error_label)
for exit_label, old_label in [(except_error_label, old_error_label),
(try_break_label, old_break_label),
(try_continue_label, old_continue_label),
(try_return_label, old_return_label),
(except_return_label, old_return_label)]:
if code.label_used(exit_label):
if not normal_case_terminates and not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(exit_label)
code.mark_pos(self.pos, trace=False)
restore_saved_exception()
code.put_goto(old_label)
if code.label_used(except_end_label):
if not normal_case_terminates and not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(except_end_label)
restore_saved_exception()
if code.label_used(try_end_label):
code.put_label(try_end_label)
code.putln("}")
for cname in exc_save_vars:
code.funcstate.release_temp(cname)
code.return_label = old_return_label
code.break_label = old_break_label
code.continue_label = old_continue_label
code.error_label = old_error_label
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
for except_clause in self.except_clauses:
except_clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
for except_node in self.except_clauses:
except_node.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class ExceptClauseNode(Node):
# Part of try ... except statement.
#
# pattern [ExprNode]
# target ExprNode or None
# body StatNode
# excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!)
# match_flag string result of exception match
# exc_value ExcValueNode used internally
# function_name string qualified name of enclosing function
# exc_vars (string * 3) local exception variables
# is_except_as bool Py3-style "except ... as xyz"
# excinfo_target is never set by the parser, but can be set by a transform
# in order to extract more extensive information about the exception as a
# sys.exc_info()-style tuple into a target variable
child_attrs = ["pattern", "target", "body", "exc_value"]
exc_value = None
excinfo_target = None
is_except_as = False
def analyse_declarations(self, env):
if self.target:
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.function_name = env.qualified_name
if self.pattern:
# normalise/unpack self.pattern into a list
for i, pattern in enumerate(self.pattern):
pattern = pattern.analyse_expressions(env)
self.pattern[i] = pattern.coerce_to_pyobject(env)
if self.target:
from . import ExprNodes
self.exc_value = ExprNodes.ExcValueNode(self.pos)
self.target = self.target.analyse_target_expression(env, self.exc_value)
self.body = self.body.analyse_expressions(env)
return self
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
if self.pattern:
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
exc_tests.append("PyErr_ExceptionMatches(%s)" % pattern.py_result())
match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln(
"%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
code.putln(
"if (%s) {" %
match_flag)
code.funcstate.release_temp(match_flag)
else:
code.putln("/*except:*/ {")
if (not getattr(self.body, 'stats', True)
and self.excinfo_target is None
and self.target is None):
# most simple case: no exception variable, empty body (pass)
# => reset the exception state, done
code.putln("PyErr_Restore(0,0,0);")
code.put_goto(end_label)
code.putln("}")
return
exc_vars = [code.funcstate.allocate_temp(py_object_type,
manage_ref=True)
for _ in range(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
# there is no target, because this also normalises the
# exception and stores it in the thread state.
code.globalstate.use_utility_code(get_exception_utility_code)
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (exc_args,
code.error_goto(self.pos)))
for x in exc_vars:
code.put_gotref(x)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
self.target.generate_assignment_code(self.exc_value, code)
if self.excinfo_target is not None:
for tempvar, node in zip(exc_vars, self.excinfo_target.args):
node.set_var(tempvar)
old_break_label, old_continue_label = code.break_label, code.continue_label
code.break_label = code.new_label('except_break')
code.continue_label = code.new_label('except_continue')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars
self.body.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
if not self.body.is_terminator:
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(end_label)
for new_label, old_label in [(code.break_label, old_break_label),
(code.continue_label, old_continue_label)]:
if code.label_used(new_label):
code.put_label(new_label)
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(old_label)
code.break_label = old_break_label
code.continue_label = old_continue_label
for temp in exc_vars:
code.funcstate.release_temp(temp)
code.putln(
"}")
def generate_function_definitions(self, env, code):
if self.target is not None:
self.target.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
if self.pattern:
for pattern in self.pattern:
pattern.annotate(code)
if self.target:
self.target.annotate(code)
self.body.annotate(code)
class TryFinallyStatNode(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
# finally_except_clause deep-copy of finally_clause for exception case
#
# The plan is that we funnel all continue, break
# return and error gotos into the beginning of the
# finally block, setting a variable to remember which
# one we're doing. At the end of the finally block, we
# switch on the variable to figure out where to go.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
child_attrs = ["body", "finally_clause", "finally_except_clause"]
preserve_exception = 1
# handle exception case, in addition to return/break/continue
handle_error_case = True
func_return_type = None
finally_except_clause = None
disallow_continue_in_try_finally = 0
# There doesn't seem to be any point in disallowing
# continue in the try block, since we have no problem
# handling it.
is_try_finally_in_nogil = False
@staticmethod
def create_analysed(pos, env, body, finally_clause):
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_except_clause = copy.deepcopy(self.finally_clause)
self.finally_except_clause.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
self.finally_clause = self.finally_clause.analyse_expressions(env)
self.finally_except_clause = self.finally_except_clause.analyse_expressions(env)
if env.return_type and not env.return_type.is_void:
self.func_return_type = env.return_type
return self
nogil_check = Node.gil_error
gil_message = "Try-finally statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
if not self.handle_error_case:
code.error_label = old_error_label
catch_label = code.new_label()
code.putln("/*try:*/ {")
if self.disallow_continue_in_try_finally:
was_in_try_finally = code.funcstate.in_try_finally
code.funcstate.in_try_finally = 1
self.body.generate_execution_code(code)
if self.disallow_continue_in_try_finally:
code.funcstate.in_try_finally = was_in_try_finally
code.putln("}")
code.set_all_labels(old_labels)
temps_to_clean_up = code.funcstate.all_free_managed_temps()
code.mark_pos(self.finally_clause.pos)
code.putln("/*finally:*/ {")
def fresh_finally_clause(_next=[self.finally_clause]):
# generate the original subtree once and always keep a fresh copy
node = _next[0]
node_copy = copy.deepcopy(node)
if node is self.finally_clause:
_next[0] = node_copy
else:
node = node_copy
return node
preserve_error = self.preserve_exception and code.label_used(new_error_label)
needs_success_cleanup = not self.finally_clause.is_terminator
if not self.body.is_terminator:
code.putln('/*normal exit:*/{')
fresh_finally_clause().generate_execution_code(code)
if not self.finally_clause.is_terminator:
code.put_goto(catch_label)
code.putln('}')
if preserve_error:
code.putln('/*exception exit:*/{')
if self.is_try_finally_in_nogil:
code.declare_gilstate()
if needs_success_cleanup:
exc_lineno_cnames = tuple([
code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
for _ in range(2)])
exc_filename_cname = code.funcstate.allocate_temp(
PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)),
manage_ref=False)
else:
exc_lineno_cnames = exc_filename_cname = None
exc_vars = tuple([
code.funcstate.allocate_temp(py_object_type, manage_ref=False)
for _ in range(6)])
code.put_label(new_error_label)
self.put_error_catcher(
code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname)
finally_old_labels = code.all_new_labels()
code.putln('{')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars[:3]
self.finally_except_clause.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
code.putln('}')
if needs_success_cleanup:
self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname)
if exc_lineno_cnames:
for cname in exc_lineno_cnames:
code.funcstate.release_temp(cname)
if exc_filename_cname:
code.funcstate.release_temp(exc_filename_cname)
code.put_goto(old_error_label)
for new_label, old_label in zip(code.get_all_labels(), finally_old_labels):
if not code.label_used(new_label):
continue
code.put_label(new_label)
self.put_error_cleaner(code, exc_vars)
code.put_goto(old_label)
for cname in exc_vars:
code.funcstate.release_temp(cname)
code.putln('}')
code.set_all_labels(old_labels)
return_label = code.return_label
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
if new_label == new_error_label and preserve_error:
continue # handled above
code.put('%s: ' % new_label)
code.putln('{')
ret_temp = None
if old_label == return_label and not self.finally_clause.is_terminator:
# store away return value for later reuse
if (self.func_return_type and
not self.is_try_finally_in_nogil and
not isinstance(self.finally_clause, GILExitNode)):
ret_temp = code.funcstate.allocate_temp(
self.func_return_type, manage_ref=False)
code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % Naming.retval_cname)
fresh_finally_clause().generate_execution_code(code)
if ret_temp:
code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % ret_temp)
code.funcstate.release_temp(ret_temp)
ret_temp = None
if not self.finally_clause.is_terminator:
code.put_goto(old_label)
code.putln('}')
# End finally
code.put_label(catch_label)
code.putln(
"}")
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
self.finally_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(get_exception_utility_code)
code.globalstate.use_utility_code(swap_exception_utility_code)
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3)"
" __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:])
code.putln("if ((PY_MAJOR_VERSION < 3) ||"
# if __Pyx_GetException() fails in Py3,
# store the newly raised exception instead
" unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
"__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2))
for var in exc_vars:
code.put_xgotref(var)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
exc_lineno_cnames[0], Naming.lineno_cname,
exc_lineno_cnames[1], Naming.clineno_cname,
exc_filename_cname, Naming.filename_cname))
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xgiveref(var)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3])
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
Naming.lineno_cname, exc_lineno_cnames[0],
Naming.clineno_cname, exc_lineno_cnames[1],
Naming.filename_cname, exc_filename_cname))
def put_error_cleaner(self, code, exc_vars):
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xdecref_clear(var, py_object_type)
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:])
def annotate(self, code):
self.body.annotate(code)
self.finally_clause.annotate(code)
class NogilTryFinallyStatNode(TryFinallyStatNode):
"""
A try/finally statement that may be used in nogil code sections.
"""
preserve_exception = False
nogil_check = None
class GILStatNode(NogilTryFinallyStatNode):
# 'with gil' or 'with nogil' statement
#
# state string 'gil' or 'nogil'
state_temp = None
def __init__(self, pos, state, body):
self.state = state
self.create_state_temp_if_needed(pos, state, body)
TryFinallyStatNode.__init__(self, pos,
body=body,
finally_clause=GILExitNode(
pos, state=state, state_temp=self.state_temp))
def create_state_temp_if_needed(self, pos, state, body):
from .ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(body)
if not collector.yields and not collector.awaits:
return
if state == 'gil':
temp_type = PyrexTypes.c_gilstate_type
else:
temp_type = PyrexTypes.c_threadstate_ptr_type
from . import ExprNodes
self.state_temp = ExprNodes.TempNode(pos, temp_type)
def analyse_declarations(self, env):
env._in_with_gil_block = (self.state == 'gil')
if self.state == 'gil':
env.has_with_gil_block = True
return super(GILStatNode, self).analyse_declarations(env)
def analyse_expressions(self, env):
env.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
was_nogil = env.nogil
env.nogil = self.state == 'nogil'
node = TryFinallyStatNode.analyse_expressions(self, env)
env.nogil = was_nogil
return node
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.begin_block()
if self.state_temp:
self.state_temp.allocate(code)
variable = self.state_temp.result()
else:
variable = None
old_gil_config = code.funcstate.gil_owned
if self.state == 'gil':
code.put_ensure_gil(variable=variable)
code.funcstate.gil_owned = True
else:
code.put_release_gil(variable=variable)
code.funcstate.gil_owned = False
TryFinallyStatNode.generate_execution_code(self, code)
if self.state_temp:
self.state_temp.release(code)
code.funcstate.gil_owned = old_gil_config
code.end_block()
class GILExitNode(StatNode):
"""
Used as the 'finally' block in a GILStatNode
state string 'gil' or 'nogil'
"""
child_attrs = []
state_temp = None
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.state_temp:
variable = self.state_temp.result()
else:
variable = None
if self.state == 'gil':
code.put_release_ensured_gil(variable)
else:
code.put_acquire_gil(variable)
class EnsureGILNode(GILExitNode):
"""
Ensure the GIL in nogil functions for cleanup before returning.
"""
def generate_execution_code(self, code):
code.put_ensure_gil(declare_gilstate=False)
utility_code_for_cimports = {
# utility code (or inlining c) in a pxd (or pyx) file.
# TODO: Consider a generic user-level mechanism for importing
'cpython.array' : ("ArrayAPI", "arrayarray.h"),
'cpython.array.array' : ("ArrayAPI", "arrayarray.h"),
}
utility_code_for_imports = {
# utility code used when special modules are imported.
# TODO: Consider a generic user-level mechanism for importing
'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"),
'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"),
}
class CImportStatNode(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
# is_absolute bool True for absolute imports, False otherwise
child_attrs = []
is_absolute = False
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(
self.module_name, self.pos, relative_level=0 if self.is_absolute else -1)
if "." in self.module_name:
names = [EncodedString(name) for name in self.module_name.split(".")]
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
else:
env.add_imported_module(module_scope)
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
env.declare_module(name, module_scope, self.pos)
if self.module_name in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[self.module_name]))
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# relative_level int or None Relative import: number of dots before module_name
# imported_names [(pos, name, as_name, kind)] Names to be imported
child_attrs = []
module_name = None
relative_level = None
imported_names = None
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
if self.relative_level and self.relative_level > env.qualified_name.count('.'):
error(self.pos, "relative cimport beyond main package is not allowed")
return
module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level)
module_name = module_scope.qualified_name
env.add_imported_module(module_scope)
for pos, name, as_name, kind in self.imported_names:
if name == "*":
for local_name, entry in list(module_scope.entries.items()):
env.add_imported_entry(local_name, entry, pos)
else:
entry = module_scope.lookup(name)
if entry:
if kind and not self.declaration_matches(entry, kind):
entry.redeclared(pos)
entry.used = 1
else:
if kind == 'struct' or kind == 'union':
entry = module_scope.declare_struct_or_union(
name, kind=kind, scope=None, typedef_flag=0, pos=pos)
elif kind == 'class':
entry = module_scope.declare_c_class(name, pos=pos, module_name=module_name)
else:
submodule_scope = env.context.find_module(
name, relative_to=module_scope, pos=self.pos, absolute_fallback=False)
if submodule_scope.parent_module is module_scope:
env.declare_module(as_name or name, submodule_scope, self.pos)
else:
error(pos, "Name '%s' not declared in module '%s'" % (name, module_name))
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
if module_name.startswith('cpython'): # enough for now
if module_name in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[module_name]))
for _, name, _, _ in self.imported_names:
fqname = '%s.%s' % (module_name, name)
if fqname in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[fqname]))
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
type = entry.type
if kind == 'class':
if not type.is_extension_type:
return 0
else:
if not type.is_struct_or_union:
return 0
if kind != type.kind:
return 0
return 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromImportStatNode(StatNode):
# from ... import statement
#
# module ImportNode
# items [(string, NameNode)]
# interned_items [(string, NameNode, ExprNode)]
# item PyTempNode used internally
# import_star boolean used internally
child_attrs = ["module"]
import_star = 0
def analyse_declarations(self, env):
for name, target in self.items:
if name == "*":
if not env.is_module_scope:
error(self.pos, "import * only allowed at module level")
return
env.has_import_star = 1
self.import_star = 1
else:
target.analyse_target_declaration(env)
def analyse_expressions(self, env):
from . import ExprNodes
self.module = self.module.analyse_expressions(env)
self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
self.interned_items = []
for name, target in self.items:
if name == '*':
for _, entry in env.entries.items():
if not entry.is_type and entry.type.is_extension_type:
env.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
break
else:
entry = env.lookup(target.name)
# check whether or not entry is already cimported
if (entry.is_type and entry.type.name == name
and hasattr(entry.type, 'module_name')):
if entry.type.module_name == self.module.module_name.value:
# cimported with absolute name
continue
try:
# cimported with relative name
module = env.find_module(self.module.module_name.value, pos=self.pos,
relative_level=self.module.level)
if entry.type.module_name == module.qualified_name:
continue
except AttributeError:
pass
target = target.analyse_target_expression(env, None) # FIXME?
if target.type is py_object_type:
coerced_item = None
else:
coerced_item = self.item.coerce_to(target.type, env)
self.interned_items.append((name, target, coerced_item))
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.module.generate_evaluation_code(code)
if self.import_star:
code.putln(
'if (%s(%s) < 0) %s;' % (
Naming.import_star,
self.module.py_result(),
code.error_goto(self.pos)))
item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.item.set_cname(item_temp)
if self.interned_items:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ImportFrom", "ImportExport.c"))
for name, target, coerced_item in self.interned_items:
code.putln(
'%s = __Pyx_ImportFrom(%s, %s); %s' % (
item_temp,
self.module.py_result(),
code.intern_identifier(name),
code.error_goto_if_null(item_temp, self.pos)))
code.put_gotref(item_temp)
if coerced_item is None:
target.generate_assignment_code(self.item, code)
else:
coerced_item.allocate_temp_result(code)
coerced_item.generate_result_code(code)
target.generate_assignment_code(coerced_item, code)
code.put_decref_clear(item_temp, py_object_type)
code.funcstate.release_temp(item_temp)
self.module.generate_disposal_code(code)
self.module.free_temps(code)
class ParallelNode(Node):
"""
Base class for cython.parallel constructs.
"""
nogil_check = None
class ParallelStatNode(StatNode, ParallelNode):
"""
Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
assignments to variables in this parallel section
parent parent ParallelStatNode or None
is_parallel indicates whether this node is OpenMP parallel
(true for #pragma omp parallel for and
#pragma omp parallel)
is_parallel is true for:
#pragma omp parallel
#pragma omp parallel for
sections, but NOT for
#pragma omp for
We need this to determine the sharing attributes.
privatization_insertion_point a code insertion point used to make temps
private (esp. the "nsteps" temp)
args tuple the arguments passed to the parallel construct
kwargs DictNode the keyword arguments passed to the parallel
construct (replaced by its compile time value)
"""
child_attrs = ['body', 'num_threads']
body = None
is_prange = False
is_nested_prange = False
error_label_used = False
num_threads = None
chunksize = None
parallel_exc = (
Naming.parallel_exc_type,
Naming.parallel_exc_value,
Naming.parallel_exc_tb,
)
parallel_pos_info = (
Naming.parallel_filename,
Naming.parallel_lineno,
Naming.parallel_clineno,
)
pos_info = (
Naming.filename_cname,
Naming.lineno_cname,
Naming.clineno_cname,
)
critical_section_counter = 0
def __init__(self, pos, **kwargs):
super(ParallelStatNode, self).__init__(pos, **kwargs)
# All assignments in this scope
self.assignments = kwargs.get('assignments') or {}
# All seen closure cnames and their temporary cnames
self.seen_closure_vars = set()
# Dict of variables that should be declared (first|last|)private or
# reduction { Entry: (op, lastprivate) }.
# If op is not None, it's a reduction.
self.privates = {}
# [NameNode]
self.assigned_nodes = []
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.num_threads = None
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
for dictitem in self.kwargs.key_value_pairs:
if dictitem.key.value == 'num_threads':
self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
self.chunksize = dictitem.value
else:
pairs.append(dictitem)
self.kwargs.key_value_pairs = pairs
try:
self.kwargs = self.kwargs.compile_time_value(env)
except Exception as e:
error(self.kwargs.pos, "Only compile-time values may be "
"supplied as keyword arguments")
else:
self.kwargs = {}
for kw, val in self.kwargs.items():
if kw not in self.valid_keyword_arguments:
error(self.pos, "Invalid keyword argument: %s" % kw)
else:
setattr(self, kw, val)
def analyse_expressions(self, env):
if self.num_threads:
self.num_threads = self.num_threads.analyse_expressions(env)
if self.chunksize:
self.chunksize = self.chunksize.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
self.analyse_sharing_attributes(env)
if self.num_threads is not None:
if (self.parent and self.parent.num_threads is not None and not
self.parent.is_prange):
error(self.pos,
"num_threads already declared in outer section")
elif self.parent and not self.parent.is_prange:
error(self.pos,
"num_threads must be declared in the parent parallel section")
elif (self.num_threads.type.is_int and
self.num_threads.is_literal and
self.num_threads.compile_time_value(env) <= 0):
error(self.pos,
"argument to num_threads must be greater than 0")
if not self.num_threads.is_simple():
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
def analyse_sharing_attributes(self, env):
"""
Analyse the privates for this block and set them in self.privates.
This should be called in a post-order fashion during the
analyse_expressions phase
"""
for entry, (pos, op) in self.assignments.items():
if self.is_prange and not self.is_parallel:
# closely nested prange in a with parallel block, disallow
# assigning to privates in the with parallel block (we
# consider it too implicit and magicky for users)
if entry in self.parent.assignments:
error(pos,
"Cannot assign to private of outer parallel block")
continue
if not self.is_prange and op:
# Again possible, but considered to magicky
error(pos, "Reductions not allowed for parallel blocks")
continue
# By default all variables should have the same values as if
# executed sequentially
lastprivate = True
self.propagate_var_privatization(entry, pos, op, lastprivate)
def propagate_var_privatization(self, entry, pos, op, lastprivate):
"""
Propagate the sharing attributes of a variable. If the privatization is
determined by a parent scope, done propagate further.
If we are a prange, we propagate our sharing attributes outwards to
other pranges. If we are a prange in parallel block and the parallel
block does not determine the variable private, we propagate to the
parent of the parent. Recursion stops at parallel blocks, as they have
no concept of lastprivate or reduction.
So the following cases propagate:
sum is a reduction for all loops:
for i in prange(n):
for j in prange(n):
for k in prange(n):
sum += i * j * k
sum is a reduction for both loops, local_var is private to the
parallel with block:
for i in prange(n):
with parallel:
local_var = ... # private to the parallel
for j in prange(n):
sum += i * j
Nested with parallel blocks are disallowed, because they wouldn't
allow you to propagate lastprivates or reductions:
#pragma omp parallel for lastprivate(i)
for i in prange(n):
sum = 0
#pragma omp parallel private(j, sum)
with parallel:
#pragma omp parallel
with parallel:
#pragma omp for lastprivate(j) reduction(+:sum)
for j in prange(n):
sum += i
# sum and j are well-defined here
# sum and j are undefined here
# sum and j are undefined here
"""
self.privates[entry] = (op, lastprivate)
if entry.type.is_memoryviewslice:
error(pos, "Memoryview slices can only be shared in parallel sections")
return
if self.is_prange:
if not self.is_parallel and entry not in self.parent.assignments:
# Parent is a parallel with block
parent = self.parent.parent
else:
parent = self.parent
# We don't need to propagate privates, only reductions and
# lastprivates
if parent and (op or lastprivate):
parent.propagate_var_privatization(entry, pos, op, lastprivate)
def _allocate_closure_temp(self, code, entry):
"""
Helper function that allocate a temporary for a closure variable that
is assigned to.
"""
if self.parent:
return self.parent._allocate_closure_temp(code, entry)
if entry.cname in self.seen_closure_vars:
return entry.cname
cname = code.funcstate.allocate_temp(entry.type, True)
# Add both the actual cname and the temp cname, as the actual cname
# will be replaced with the temp cname on the entry
self.seen_closure_vars.add(entry.cname)
self.seen_closure_vars.add(cname)
self.modified_entries.append((entry, entry.cname))
code.putln("%s = %s;" % (cname, entry.cname))
entry.cname = cname
def initialize_privates_to_nan(self, code, exclude=None):
first = True
for entry, (op, lastprivate) in self.privates.items():
if not op and (not exclude or entry != exclude):
invalid_value = entry.type.invalid_value()
if invalid_value:
if first:
code.putln("/* Initialize private variables to "
"invalid values */")
first = False
code.putln("%s = %s;" % (entry.cname,
entry.type.cast_code(invalid_value)))
def evaluate_before_block(self, code, expr):
c = self.begin_of_parallel_control_block_point_after_decls
# we need to set the owner to ourselves temporarily, as
# allocate_temp may generate a comment in the middle of our pragma
# otherwise when DebugFlags.debug_temp_code_comments is in effect
owner = c.funcstate.owner
c.funcstate.owner = c
expr.generate_evaluation_code(c)
c.funcstate.owner = owner
return expr.result()
def put_num_threads(self, code):
"""
Write self.num_threads if set as the num_threads OpenMP directive
"""
if self.num_threads is not None:
code.put(" num_threads(%s)" % self.evaluate_before_block(code,
self.num_threads))
def declare_closure_privates(self, code):
"""
If a variable is in a scope object, we need to allocate a temp and
assign the value from the temp to the variable in the scope object
after the parallel section. This kind of copying should be done only
in the outermost parallel section.
"""
self.modified_entries = []
for entry in self.assignments:
if entry.from_closure or entry.in_closure:
self._allocate_closure_temp(code, entry)
def release_closure_privates(self, code):
"""
Release any temps used for variables in scope objects. As this is the
outermost parallel block, we don't need to delete the cnames from
self.seen_closure_vars.
"""
for entry, original_cname in self.modified_entries:
code.putln("%s = %s;" % (original_cname, entry.cname))
code.funcstate.release_temp(entry.cname)
entry.cname = original_cname
def privatize_temps(self, code, exclude_temps=()):
"""
Make any used temporaries private. Before the relevant code block
code.start_collecting_temps() should have been called.
"""
if self.is_parallel:
c = self.privatization_insertion_point
self.temps = temps = code.funcstate.stop_collecting_temps()
privates, firstprivates = [], []
for temp, type in temps:
if type.is_pyobject or type.is_memoryviewslice:
firstprivates.append(temp)
else:
privates.append(temp)
if privates:
c.put(" private(%s)" % ", ".join(privates))
if firstprivates:
c.put(" firstprivate(%s)" % ", ".join(firstprivates))
if self.breaking_label_used:
shared_vars = [Naming.parallel_why]
if self.error_label_used:
shared_vars.extend(self.parallel_exc)
c.put(" private(%s, %s, %s)" % self.pos_info)
c.put(" shared(%s)" % ', '.join(shared_vars))
def cleanup_temps(self, code):
# Now clean up any memoryview slice and object temporaries
if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in self.temps:
if type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(temp, have_gil=False)
elif type.is_pyobject:
code.put_xdecref(temp, type)
code.putln("%s = NULL;" % temp)
def setup_parallel_control_flow_block(self, code):
"""
Sets up a block that surrounds the parallel block to determine
how the parallel section was exited. Any kind of return is
trapped (break, continue, return, exceptions). This is the idea:
{
int why = 0;
#pragma omp parallel
{
return # -> goto new_return_label;
goto end_parallel;
new_return_label:
why = 3;
goto end_parallel;
end_parallel:;
#pragma omp flush(why) # we need to flush for every iteration
}
if (why == 3)
goto old_return_label;
}
"""
self.old_loop_labels = code.new_loop_labels()
self.old_error_label = code.new_error_label()
self.old_return_label = code.return_label
code.return_label = code.new_label(name="return")
code.begin_block() # parallel control flow block
self.begin_of_parallel_control_block_point = code.insertion_point()
self.begin_of_parallel_control_block_point_after_decls = code.insertion_point()
self.undef_builtin_expect_apple_gcc_bug(code)
def begin_parallel_block(self, code):
"""
Each OpenMP thread in a parallel section that contains a with gil block
must have the thread-state initialized. The call to
PyGILState_Release() then deallocates our threadstate. If we wouldn't
do this, each with gil block would allocate and deallocate one, thereby
losing exception information before it can be saved before leaving the
parallel section.
"""
self.begin_of_parallel_block = code.insertion_point()
def end_parallel_block(self, code):
"""
To ensure all OpenMP threads have thread states, we ensure the GIL
in each thread (which creates a thread state if it doesn't exist),
after which we release the GIL.
On exit, reacquire the GIL and release the thread state.
If compiled without OpenMP support (at the C level), then we still have
to acquire the GIL to decref any object temporaries.
"""
if self.error_label_used:
begin_code = self.begin_of_parallel_block
end_code = code
begin_code.putln("#ifdef _OPENMP")
begin_code.put_ensure_gil(declare_gilstate=True)
begin_code.putln("Py_BEGIN_ALLOW_THREADS")
begin_code.putln("#endif /* _OPENMP */")
end_code.putln("#ifdef _OPENMP")
end_code.putln("Py_END_ALLOW_THREADS")
end_code.putln("#else")
end_code.put_safe("{\n")
end_code.put_ensure_gil()
end_code.putln("#endif /* _OPENMP */")
self.cleanup_temps(end_code)
end_code.put_release_ensured_gil()
end_code.putln("#ifndef _OPENMP")
end_code.put_safe("}\n")
end_code.putln("#endif /* _OPENMP */")
def trap_parallel_exit(self, code, should_flush=False):
"""
Trap any kind of return inside a parallel construct. 'should_flush'
indicates whether the variable should be flushed, which is needed by
prange to skip the loop. It also indicates whether we need to register
a continue (we need this for parallel blocks, but not for prange
loops, as it is a direct jump there).
It uses the same mechanism as try/finally:
1 continue
2 break
3 return
4 error
"""
save_lastprivates_label = code.new_label()
dont_return_label = code.new_label()
self.any_label_used = False
self.breaking_label_used = False
self.error_label_used = False
self.parallel_private_temps = []
all_labels = code.get_all_labels()
# Figure this out before starting to generate any code
for label in all_labels:
if code.label_used(label):
self.breaking_label_used = (self.breaking_label_used or
label != code.continue_label)
self.any_label_used = True
if self.any_label_used:
code.put_goto(dont_return_label)
for i, label in enumerate(all_labels):
if not code.label_used(label):
continue
is_continue_label = label == code.continue_label
code.put_label(label)
if not (should_flush and is_continue_label):
if label == code.error_label:
self.error_label_used = True
self.fetch_parallel_exception(code)
code.putln("%s = %d;" % (Naming.parallel_why, i + 1))
if (self.breaking_label_used and self.is_prange and not
is_continue_label):
code.put_goto(save_lastprivates_label)
else:
code.put_goto(dont_return_label)
if self.any_label_used:
if self.is_prange and self.breaking_label_used:
# Don't rely on lastprivate, save our lastprivates
code.put_label(save_lastprivates_label)
self.save_parallel_vars(code)
code.put_label(dont_return_label)
if should_flush and self.breaking_label_used:
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why)
def save_parallel_vars(self, code):
"""
The following shenanigans are instated when we break, return or
propagate errors from a prange. In this case we cannot rely on
lastprivate() to do its job, as no iterations may have executed yet
in the last thread, leaving the values undefined. It is most likely
that the breaking thread has well-defined values of the lastprivate
variables, so we keep those values.
"""
section_name = ("__pyx_parallel_lastprivates%d" %
self.critical_section_counter)
code.putln_openmp("#pragma omp critical(%s)" % section_name)
ParallelStatNode.critical_section_counter += 1
code.begin_block() # begin critical section
c = self.begin_of_parallel_control_block_point
temp_count = 0
for entry, (op, lastprivate) in self.privates.items():
if not lastprivate or entry.type.is_pyobject:
continue
type_decl = entry.type.empty_declaration_code()
temp_cname = "__pyx_parallel_temp%d" % temp_count
private_cname = entry.cname
temp_count += 1
invalid_value = entry.type.invalid_value()
if invalid_value:
init = ' = ' + invalid_value
else:
init = ''
# Declare the parallel private in the outer block
c.putln("%s %s%s;" % (type_decl, temp_cname, init))
# Initialize before escaping
code.putln("%s = %s;" % (temp_cname, private_cname))
self.parallel_private_temps.append((temp_cname, private_cname))
code.end_block() # end critical section
def fetch_parallel_exception(self, code):
"""
As each OpenMP thread may raise an exception, we need to fetch that
exception from the threadstate and save it for after the parallel
section where it can be re-raised in the master thread.
Although it would seem that __pyx_filename, __pyx_lineno and
__pyx_clineno are only assigned to under exception conditions (i.e.,
when we have the GIL), and thus should be allowed to be shared without
any race condition, they are in fact subject to the same race
conditions that they were previously when they were global variables
and functions were allowed to release the GIL:
thread A thread B
acquire
set lineno
release
acquire
set lineno
release
acquire
fetch exception
release
skip the fetch
deallocate threadstate deallocate threadstate
"""
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type)
code.putln(
"if (!%s) {" % Naming.parallel_exc_type)
code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % self.parallel_exc)
pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
code.funcstate.uses_error_indicator = True
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_gotref(Naming.parallel_exc_type)
code.putln(
"}")
code.put_release_ensured_gil()
code.end_block()
def restore_parallel_exception(self, code):
"Re-raise a parallel exception"
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.put_giveref(Naming.parallel_exc_type)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % self.parallel_exc)
pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_release_ensured_gil()
code.end_block()
def restore_labels(self, code):
"""
Restore all old labels. Call this before the 'else' clause to for
loops and always before ending the parallel control flow block.
"""
code.set_all_labels(self.old_loop_labels + (self.old_return_label,
self.old_error_label))
def end_parallel_control_flow_block(self, code,
break_=False, continue_=False):
"""
This ends the parallel control flow block and based on how the parallel
section was exited, takes the corresponding action. The break_ and
continue_ parameters indicate whether these should be propagated
outwards:
for i in prange(...):
with cython.parallel.parallel():
continue
Here break should be trapped in the parallel block, and propagated to
the for loop.
"""
c = self.begin_of_parallel_control_block_point
# Firstly, always prefer errors over returning, continue or break
if self.error_label_used:
c.putln("const char *%s = NULL; int %s = 0, %s = 0;" %
self.parallel_pos_info)
c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" %
self.parallel_exc)
code.putln(
"if (%s) {" % Naming.parallel_exc_type)
code.putln("/* This may have been overridden by a continue, "
"break or return in another thread. Prefer the error. */")
code.putln("%s = 4;" % Naming.parallel_why)
code.putln(
"}")
if continue_:
any_label_used = self.any_label_used
else:
any_label_used = self.breaking_label_used
if any_label_used:
# __pyx_parallel_why is used, declare and initialize
c.putln("int %s;" % Naming.parallel_why)
c.putln("%s = 0;" % Naming.parallel_why)
code.putln(
"if (%s) {" % Naming.parallel_why)
for temp_cname, private_cname in self.parallel_private_temps:
code.putln("%s = %s;" % (private_cname, temp_cname))
code.putln("switch (%s) {" % Naming.parallel_why)
if continue_:
code.put(" case 1: ")
code.put_goto(code.continue_label)
if break_:
code.put(" case 2: ")
code.put_goto(code.break_label)
code.put(" case 3: ")
code.put_goto(code.return_label)
if self.error_label_used:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(" case 4:")
self.restore_parallel_exception(code)
code.put_goto(code.error_label)
code.putln("}") # end switch
code.putln(
"}") # end if
code.end_block() # end parallel control flow block
self.redef_builtin_expect_apple_gcc_bug(code)
# FIXME: improve with version number for OS X Lion
buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))"
have_expect_condition = "(defined(__GNUC__) && " \
"(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition)
def undef_builtin_expect_apple_gcc_bug(self, code):
"""
A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
"""
if not self.parent:
code.undef_builtin_expect(self.redef_condition)
def redef_builtin_expect_apple_gcc_bug(self, code):
if not self.parent:
code.redef_builtin_expect(self.redef_condition)
class ParallelWithBlockNode(ParallelStatNode):
"""
This node represents a 'with cython.parallel.parallel():' block
"""
valid_keyword_arguments = ['num_threads']
num_threads = None
def analyse_declarations(self, env):
super(ParallelWithBlockNode, self).analyse_declarations(env)
if self.args:
error(self.pos, "cython.parallel.parallel() does not take "
"positional arguments")
def generate_execution_code(self, code):
self.declare_closure_privates(code)
self.setup_parallel_control_flow_block(code)
code.putln("#ifdef _OPENMP")
code.put("#pragma omp parallel ")
if self.privates:
privates = [e.cname for e in self.privates
if not e.type.is_pyobject]
code.put('private(%s)' % ', '.join(privates))
self.privatization_insertion_point = code.insertion_point()
self.put_num_threads(code)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # parallel block
self.begin_parallel_block(code)
self.initialize_privates_to_nan(code)
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code)
self.privatize_temps(code)
self.end_parallel_block(code)
code.end_block() # end parallel block
continue_ = code.label_used(code.continue_label)
break_ = code.label_used(code.break_label)
self.restore_labels(code)
self.end_parallel_control_flow_block(code, break_=break_,
continue_=continue_)
self.release_closure_privates(code)
class ParallelRangeNode(ParallelStatNode):
"""
This node represents a 'for i in cython.parallel.prange():' construct.
target NameNode the target iteration variable
else_clause Node or None the else clause of this loop
"""
child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads',
'chunksize']
body = target = else_clause = args = None
start = stop = step = None
is_prange = True
nogil = None
schedule = None
valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize']
def __init__(self, pos, **kwds):
super(ParallelRangeNode, self).__init__(pos, **kwds)
# Pretend to be a ForInStatNode for control flow analysis
self.iterator = PassStatNode(pos)
def analyse_declarations(self, env):
super(ParallelRangeNode, self).analyse_declarations(env)
self.target.analyse_target_declaration(env)
if self.else_clause is not None:
self.else_clause.analyse_declarations(env)
if not self.args or len(self.args) > 3:
error(self.pos, "Invalid number of positional arguments to prange")
return
if len(self.args) == 1:
self.stop, = self.args
elif len(self.args) == 2:
self.start, self.stop = self.args
else:
self.start, self.stop, self.step = self.args
if hasattr(self.schedule, 'decode'):
self.schedule = self.schedule.decode('ascii')
if self.schedule not in (None, 'static', 'dynamic', 'guided',
'runtime'):
error(self.pos, "Invalid schedule argument to prange: %s" %
(self.schedule,))
def analyse_expressions(self, env):
was_nogil = env.nogil
if self.nogil:
env.nogil = True
if self.target is None:
error(self.pos, "prange() can only be used as part of a for loop")
return self
self.target = self.target.analyse_target_types(env)
if not self.target.type.is_numeric:
# Not a valid type, assume one for now anyway
if not self.target.type.is_pyobject:
# nogil_check will catch the is_pyobject case
error(self.target.pos,
"Must be of numeric type, not %s" % self.target.type)
self.index_type = PyrexTypes.c_py_ssize_t_type
else:
self.index_type = self.target.type
if not self.index_type.signed:
warning(self.target.pos,
"Unsigned index type not allowed before OpenMP 3.0",
level=2)
# Setup start, stop and step, allocating temps if needed
self.names = 'start', 'stop', 'step'
start_stop_step = self.start, self.stop, self.step
for node, name in zip(start_stop_step, self.names):
if node is not None:
node.analyse_types(env)
if not node.type.is_numeric:
error(node.pos, "%s argument must be numeric" % name)
continue
if not node.is_literal:
node = node.coerce_to_temp(env)
setattr(self, name, node)
# As we range from 0 to nsteps, computing the index along the
# way, we need a fitting type for 'i' and 'nsteps'
self.index_type = PyrexTypes.widest_numeric_type(
self.index_type, node.type)
if self.else_clause is not None:
self.else_clause = self.else_clause.analyse_expressions(env)
# Although not actually an assignment in this scope, it should be
# treated as such to ensure it is unpacked if a closure temp, and to
# ensure lastprivate behaviour and propagation. If the target index is
# not a NameNode, it won't have an entry, and an error was issued by
# ParallelRangeTransform
if hasattr(self.target, 'entry'):
self.assignments[self.target.entry] = self.target.pos, None
node = super(ParallelRangeNode, self).analyse_expressions(env)
if node.chunksize:
if not node.schedule:
error(node.chunksize.pos,
"Must provide schedule with chunksize")
elif node.schedule == 'runtime':
error(node.chunksize.pos,
"Chunksize not valid for the schedule runtime")
elif (node.chunksize.type.is_int and
node.chunksize.is_literal and
node.chunksize.compile_time_value(env) <= 0):
error(node.chunksize.pos, "Chunksize must not be negative")
node.chunksize = node.chunksize.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
if node.nogil:
env.nogil = was_nogil
node.is_nested_prange = node.parent and node.parent.is_prange
if node.is_nested_prange:
parent = node
while parent.parent and parent.parent.is_prange:
parent = parent.parent
parent.assignments.update(node.assignments)
parent.privates.update(node.privates)
parent.assigned_nodes.extend(node.assigned_nodes)
return node
def nogil_check(self, env):
names = 'start', 'stop', 'step', 'target'
nodes = self.start, self.stop, self.step, self.target
for name, node in zip(names, nodes):
if node is not None and node.type.is_pyobject:
error(node.pos, "%s may not be a Python object "
"as we don't have the GIL" % name)
def generate_execution_code(self, code):
"""
Generate code in the following steps
1) copy any closure variables determined thread-private
into temporaries
2) allocate temps for start, stop and step
3) generate a loop that calculates the total number of steps,
which then computes the target iteration variable for every step:
for i in prange(start, stop, step):
...
becomes
nsteps = (stop - start) / step;
i = start;
#pragma omp parallel for lastprivate(i)
for (temp = 0; temp < nsteps; temp++) {
i = start + step * temp;
...
}
Note that accumulation of 'i' would have a data dependency
between iterations.
Also, you can't do this
for (i = start; i < stop; i += step)
...
as the '<' operator should become '>' for descending loops.
'for i from x < i < y:' does not suffer from this problem
as the relational operator is known at compile time!
4) release our temps and write back any private closure variables
"""
self.declare_closure_privates(code)
# This can only be a NameNode
target_index_cname = self.target.entry.cname
# This will be used as the dict to format our code strings, holding
# the start, stop , step, temps and target cnames
fmt_dict = {
'target': target_index_cname,
}
# Setup start, stop and step, allocating temps if needed
start_stop_step = self.start, self.stop, self.step
defaults = '0', '0', '1'
for node, name, default in zip(start_stop_step, self.names, defaults):
if node is None:
result = default
elif node.is_literal:
result = node.get_constant_c_result_code()
else:
node.generate_evaluation_code(code)
result = node.result()
fmt_dict[name] = result
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
# TODO: check if the step is 0 and if so, raise an exception in a
# 'with gil' block. For now, just abort
code.putln("if (%(step)s == 0) abort();" % fmt_dict)
self.setup_parallel_control_flow_block(code) # parallel control flow block
self.control_flow_var_code_point = code.insertion_point()
# Note: nsteps is private in an outer scope if present
code.putln("%(nsteps)s = (%(stop)s - %(start)s) / %(step)s;" % fmt_dict)
# The target iteration variable might not be initialized, do it only if
# we are executing at least 1 iteration, otherwise we should leave the
# target unaffected. The target iteration variable is firstprivate to
# shut up compiler warnings caused by lastprivate, as the compiler
# erroneously believes that nsteps may be <= 0, leaving the private
# target index uninitialized
code.putln("if (%(nsteps)s > 0)" % fmt_dict)
code.begin_block() # if block
self.generate_loop(code, fmt_dict)
code.end_block() # end if block
self.restore_labels(code)
if self.else_clause:
if self.breaking_label_used:
code.put("if (%s < 2)" % Naming.parallel_why)
code.begin_block() # else block
code.putln("/* else */")
self.else_clause.generate_execution_code(code)
code.end_block() # end else block
# ------ cleanup ------
self.end_parallel_control_flow_block(code) # end parallel control flow block
# And finally, release our privates and write back any closure
# variables
for temp in start_stop_step + (self.chunksize, self.num_threads):
if temp is not None:
temp.generate_disposal_code(code)
temp.free_temps(code)
code.funcstate.release_temp(fmt_dict['i'])
code.funcstate.release_temp(fmt_dict['nsteps'])
self.release_closure_privates(code)
def generate_loop(self, code, fmt_dict):
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
if not self.is_parallel:
code.put("#pragma omp for")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put("#pragma omp parallel")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # pragma omp parallel begin block
# Initialize the GIL if needed for this thread
self.begin_parallel_block(code)
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
code.put("#pragma omp for")
for entry, (op, lastprivate) in self.privates.items():
# Don't declare the index variable as a reduction
if op and op in "+*-&^|" and entry != self.target.entry:
if entry.type.is_pyobject:
error(self.pos, "Python objects cannot be reductions")
else:
#code.put(" reduction(%s:%s)" % (op, entry.cname))
# This is the only way reductions + nesting works in gcc4.5
reduction_codepoint.put(
" reduction(%s:%s)" % (op, entry.cname))
else:
if entry == self.target.entry:
code.put(" firstprivate(%s)" % entry.cname)
code.put(" lastprivate(%s)" % entry.cname)
continue
if not entry.type.is_pyobject:
if lastprivate:
private = 'lastprivate'
else:
private = 'private'
code.put(" %s(%s)" % (private, entry.cname))
if self.schedule:
if self.chunksize:
chunksize = ", %s" % self.evaluate_before_block(code,
self.chunksize)
else:
chunksize = ""
code.put(" schedule(%s%s)" % (self.schedule, chunksize))
self.put_num_threads(reduction_codepoint)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict)
code.begin_block() # for loop block
guard_around_body_codepoint = code.insertion_point()
# Start if guard block around the body. This may be unnecessary, but
# at least it doesn't spoil indentation
code.begin_block()
code.putln("%(target)s = %(start)s + %(step)s * %(i)s;" % fmt_dict)
self.initialize_privates_to_nan(code, exclude=self.target.entry)
if self.is_parallel:
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code, should_flush=True)
self.privatize_temps(code)
if self.breaking_label_used:
# Put a guard around the loop body in case return, break or
# exceptions might be used
guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why)
code.end_block() # end guard around loop body
code.end_block() # end for loop block
if self.is_parallel:
# Release the GIL and deallocate the thread state
self.end_parallel_block(code)
code.end_block() # pragma omp parallel end block
class CnameDecoratorNode(StatNode):
"""
This node is for the cname decorator in CythonUtilityCode:
@cname('the_cname')
cdef func(...):
...
In case of a cdef class the cname specifies the objstruct_cname.
node the node to which the cname decorator is applied
cname the cname the node should get
"""
child_attrs = ['node']
def analyse_declarations(self, env):
self.node.analyse_declarations(env)
node = self.node
if isinstance(node, CompilerDirectivesNode):
node = node.body.stats[0]
self.is_function = isinstance(node, FuncDefNode)
is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode,
CEnumDefNode))
e = node.entry
if self.is_function:
e.cname = self.cname
e.func_cname = self.cname
e.used = True
if e.pyfunc_cname and '.' in e.pyfunc_cname:
e.pyfunc_cname = self.mangle(e.pyfunc_cname)
elif is_struct_or_enum:
e.cname = e.type.cname = self.cname
else:
scope = node.scope
e.cname = self.cname
e.type.objstruct_cname = self.cname + '_obj'
e.type.typeobj_cname = Naming.typeobj_prefix + self.cname
e.type.typeptr_cname = self.cname + '_type'
e.type.scope.namespace_cname = e.type.typeptr_cname
e.as_variable.cname = e.type.typeptr_cname
scope.scope_prefix = self.cname + "_"
for name, entry in scope.entries.items():
if entry.func_cname:
entry.func_cname = self.mangle(entry.cname)
if entry.pyfunc_cname:
entry.pyfunc_cname = self.mangle(entry.pyfunc_cname)
def mangle(self, cname):
if '.' in cname:
# remove __pyx_base from func_cname
cname = cname.split('.')[-1]
return '%s_%s' % (self.cname, cname)
def analyse_expressions(self, env):
self.node = self.node.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
"Ensure a prototype for every @cname method in the right place"
if self.is_function and env.is_c_class_scope:
# method in cdef class, generate a prototype in the header
h_code = code.globalstate['utility_code_proto']
if isinstance(self.node, DefNode):
self.node.generate_function_header(
h_code, with_pymethdef=False, proto_only=True)
else:
from . import ModuleNode
entry = self.node.entry
cname = entry.cname
entry.cname = entry.func_cname
ModuleNode.generate_cfunction_declaration(
entry,
env.global_scope(),
h_code,
definition=True)
entry.cname = cname
self.node.generate_function_definitions(env, code)
def generate_execution_code(self, code):
self.node.generate_execution_code(code)
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
if Options.gcc_branch_hints:
branch_prediction_macros = """
/* Test for GCC > 2.95 */
#if defined(__GNUC__) \
&& (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
"""
else:
branch_prediction_macros = """
#define likely(x) (x)
#define unlikely(x) (x)
"""
#------------------------------------------------------------------------------------
printing_utility_code = UtilityCode.load_cached("Print", "Printing.c")
printing_one_utility_code = UtilityCode.load_cached("PrintOne", "Printing.c")
#------------------------------------------------------------------------------------
# Exception raising code
#
# Exceptions are raised by __Pyx_Raise() and stored as plain
# type/value/tb in PyThreadState->curexc_*. When being caught by an
# 'except' statement, curexc_* is moved over to exc_* by
# __Pyx_GetException()
restore_exception_utility_code = UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")
raise_utility_code = UtilityCode.load_cached("RaiseException", "Exceptions.c")
get_exception_utility_code = UtilityCode.load_cached("GetException", "Exceptions.c")
swap_exception_utility_code = UtilityCode.load_cached("SwapException", "Exceptions.c")
reset_exception_utility_code = UtilityCode.load_cached("SaveResetException", "Exceptions.c")
traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c")
#------------------------------------------------------------------------------------
get_exception_tuple_utility_code = UtilityCode(proto="""
static PyObject *__Pyx_GetExceptionTuple(void); /*proto*/
""",
# I doubt that calling __Pyx_GetException() here is correct as it moves
# the exception from tstate->curexc_* to tstate->exc_*, which prevents
# exception handlers later on from receiving it.
impl = """
static PyObject *__Pyx_GetExceptionTuple(void) {
PyObject *type = NULL, *value = NULL, *tb = NULL;
if (__Pyx_GetException(&type, &value, &tb) == 0) {
PyObject* exc_info = PyTuple_New(3);
if (exc_info) {
Py_INCREF(type);
Py_INCREF(value);
Py_INCREF(tb);
PyTuple_SET_ITEM(exc_info, 0, type);
PyTuple_SET_ITEM(exc_info, 1, value);
PyTuple_SET_ITEM(exc_info, 2, tb);
return exc_info;
}
}
return NULL;
}
""",
requires=[get_exception_utility_code])
|
JelleZijlstra/cython
|
Cython/Compiler/Nodes.py
|
Python
|
apache-2.0
| 357,409 | 0.002781 |
# pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
class OptimizeForInferenceTest(test.TestCase):
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testOptimizeForInference(self):
self.maxDiff = 1000
unused_constant_name = "unused_constant"
unconnected_add_name = "unconnected_add"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
unused_output_add_name = "unused_output_add"
graph_def = graph_pb2.GraphDef()
unused_constant = self.create_constant_node_def(
unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
graph_def.node.extend([unused_constant])
unconnected_add_node = self.create_node_def(
"Add", unconnected_add_name,
[unused_constant_name, unused_constant_name])
self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
graph_def.node.extend([unconnected_add_node])
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
[add_name, b_constant_name])
self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
graph_def.node.extend([unused_output_add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = optimize_for_inference_lib.optimize_for_inference(
graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
self.assertProtoEquals(expected_output, output)
def testFoldBatchNorms(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
test_util.set_producer_version(ops.get_default_graph(), 8)
gen_nn_ops._batch_norm_with_global_normalization(
conv_op,
mean_op,
variance_op,
beta_op,
gamma_op,
0.00001,
False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
def testFoldFusedBatchNorms(self):
for data_format, use_gpu in [("NHWC", False), ("NCHW", True)]:
with self.test_session(use_gpu=use_gpu) as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs),
shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6],
dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op,
weights_op, [1, 1, 1, 1],
padding="SAME",
data_format=data_format,
name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
ops.get_default_graph().graph_def_versions.producer = 9
gen_nn_ops._fused_batch_norm(
conv_op,
gamma_op,
beta_op,
mean_op,
variance_op,
0.00001,
is_training=False,
data_format=data_format,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.test_session(use_gpu=use_gpu) as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(
original_result, optimized_result, rtol=1e-04, atol=1e-06)
for node in optimized_graph_def.node:
self.assertNotEqual("FusedBatchNorm", node.op)
def testFuseResizePadAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
def testFuseResizeAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
def testFusePadAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
if __name__ == "__main__":
test.main()
|
nburn42/tensorflow
|
tensorflow/python/tools/optimize_for_inference_test.py
|
Python
|
apache-2.0
| 13,432 | 0.005435 |
import datetime
from backend import db
from cruds.crud_user_type_destinations.models import UserTypeDestinations
from cruds.crud_users.models import Users
from cruds import format_urls_in_text
class WallMessages(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
date = db.Column(db.Integer)
sender = db.Column(db.Integer, db.ForeignKey("users.id"))
destination = db.Column(db.Integer, db.ForeignKey("user_type_destinations.id"))
param_value = db.Column(db.Integer())
message = db.Column(db.Text())
def set_fields(self, fields):
self.date = fields['date']
self.sender = fields['sender']
self.destination = fields['user_type_destination_id']
self.param_value = fields['parameter']
self.message = format_urls_in_text(fields['message'])
def get_sender(self):
return Users.query.filter_by(id=self.sender).all()
def get_destinations(self):
_dict = {}
query = UserTypeDestinations.query.filter_by(id=self.destination).first().users_query
query = str(query).replace('$', str(self.param_value))
exec(query, _dict)
return _dict['users']
|
sandroandrade/emile-server
|
cruds/crud_wall_messages/models.py
|
Python
|
gpl-3.0
| 1,182 | 0.001692 |
#
# Copyright 2003-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" Conversion tools between stream tags and Python objects """
import pmt
try:
from gnuradio import gr
except ImportError:
from runtime_swig import tag_t
class PythonTag(object):
" Python container for tags "
def __init__(self):
self.offset = None
self.key = None
self.value = None
self.srcid = None
def tag_to_python(tag):
""" Convert a stream tag to a Python-readable object """
newtag = PythonTag()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.to_python(tag.value)
newtag.srcid = pmt.to_python(tag.srcid)
return newtag
def tag_to_pmt(tag):
""" Convert a Python-readable object to a stream tag """
newtag = tag_t()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.from_python(tag.value)
newtag.srcid = pmt.from_python(tag.srcid)
return newtag
|
Gabotero/GNURadioNext
|
gnuradio-runtime/python/gnuradio/gr/tag_utils.py
|
Python
|
gpl-3.0
| 1,719 | 0.004072 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0006_auto_20151115_0609'),
]
operations = [
migrations.AlterModelOptions(
name='job',
options={'ordering': ['order'], 'verbose_name': 'Job Details', 'verbose_name_plural': 'Job Postings'},
),
migrations.AddField(
model_name='job',
name='location',
field=models.CharField(default=b'', max_length=100),
),
migrations.AlterField(
model_name='job',
name='level',
field=models.IntegerField(default=0, choices=[(3500, b'Platinum'), (2000, b'Diamond'), (1500, b'Gold'), (1000, b'Silver'), (500, b'Bronze'), (0, b'None')]),
),
]
|
simeonf/sfpython
|
sfpython/jobs/migrations/0007_auto_20151115_0614.py
|
Python
|
apache-2.0
| 865 | 0.002312 |
MAJOR = 1
MINOR = 0
PATCH = 0
__version__ = "{0}.{1}.{2}".format(MAJOR, MINOR, PATCH)
|
Rdbaker/Rank
|
rank/__init__.py
|
Python
|
mit
| 87 | 0 |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import datetime
import functools
import inspect
import json
from oslo_utils import strutils
from oslo_utils import timeutils
import pecan
import six
import wsme
from wsme import types as wtypes
from ceilometer.i18n import _
operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt')
operation_kind_enum = wtypes.Enum(str, *operation_kind)
class ClientSideError(wsme.exc.ClientSideError):
def __init__(self, error, status_code=400):
pecan.response.translatable_error = error
super(ClientSideError, self).__init__(error, status_code)
class EntityNotFound(ClientSideError):
def __init__(self, entity, id):
super(EntityNotFound, self).__init__(
_("%(entity)s %(id)s Not Found") % {'entity': entity,
'id': id},
status_code=404)
class ProjectNotAuthorized(ClientSideError):
def __init__(self, id, aspect='project'):
params = dict(aspect=aspect, id=id)
super(ProjectNotAuthorized, self).__init__(
_("Not Authorized to access %(aspect)s %(id)s") % params,
status_code=401)
class AdvEnum(wtypes.wsproperty):
"""Handle default and mandatory for wtypes.Enum."""
def __init__(self, name, *args, **kwargs):
self._name = '_advenum_%s' % name
self._default = kwargs.pop('default', None)
mandatory = kwargs.pop('mandatory', False)
enum = wtypes.Enum(*args, **kwargs)
super(AdvEnum, self).__init__(datatype=enum, fget=self._get,
fset=self._set, mandatory=mandatory)
def _get(self, parent):
if hasattr(parent, self._name):
value = getattr(parent, self._name)
return value or self._default
return self._default
def _set(self, parent, value):
try:
if self.datatype.validate(value):
setattr(parent, self._name, value)
except ValueError as e:
raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1),
value, e)
class Base(wtypes.DynamicBase):
@classmethod
def from_db_model(cls, m):
return cls(**(m.as_dict()))
@classmethod
def from_db_and_links(cls, m, links):
return cls(links=links, **(m.as_dict()))
def as_dict(self, db_model):
valid_keys = inspect.getargspec(db_model.__init__)[0]
if 'self' in valid_keys:
valid_keys.remove('self')
return self.as_dict_from_keys(valid_keys)
def as_dict_from_keys(self, keys):
return dict((k, getattr(self, k))
for k in keys
if hasattr(self, k) and
getattr(self, k) != wsme.Unset)
class Link(Base):
"""A link representation."""
href = wtypes.text
"The url of a link"
rel = wtypes.text
"The name of a link"
@classmethod
def sample(cls):
return cls(href=('http://localhost:8777/v2/meters/volume?'
'q.field=resource_id&'
'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
rel='volume'
)
class Query(Base):
"""Query filter."""
# The data types supported by the query.
_supported_types = ['integer', 'float', 'string', 'boolean', 'datetime']
# Functions to convert the data field to the correct type.
_type_converters = {'integer': int,
'float': float,
'boolean': functools.partial(
strutils.bool_from_string, strict=True),
'string': six.text_type,
'datetime': timeutils.parse_isotime}
_op = None # provide a default
def get_op(self):
return self._op or 'eq'
def set_op(self, value):
self._op = value
field = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the field to test"
# op = wsme.wsattr(operation_kind, default='eq')
# this ^ doesn't seem to work.
op = wsme.wsproperty(operation_kind_enum, get_op, set_op)
"The comparison operator. Defaults to 'eq'."
value = wsme.wsattr(wtypes.text, mandatory=True)
"The value to compare against the stored data"
type = wtypes.text
"The data type of value to compare against the stored data"
def __repr__(self):
# for logging calls
return '<Query %r %s %r %s>' % (self.field,
self.op,
self.value,
self.type)
@classmethod
def sample(cls):
return cls(field='resource_id',
op='eq',
value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
type='string'
)
def as_dict(self):
return self.as_dict_from_keys(['field', 'op', 'type', 'value'])
def _get_value_as_type(self, forced_type=None):
"""Convert metadata value to the specified data type.
This method is called during metadata query to help convert the
querying metadata to the data type specified by user. If there is no
data type given, the metadata will be parsed by ast.literal_eval to
try to do a smart converting.
NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised
from wsmeext/sphinxext.py. It's OK to call it outside the Query class.
Because the "public" side of that class is actually the outside of the
API, and the "private" side is the API implementation. The method is
only used in the API implementation, so it's OK.
:returns: metadata value converted with the specified data type.
"""
type = forced_type or self.type
try:
converted_value = self.value
if not type:
try:
converted_value = ast.literal_eval(self.value)
except (ValueError, SyntaxError):
# Unable to convert the metadata value automatically
# let it default to self.value
pass
else:
if type not in self._supported_types:
# Types must be explicitly declared so the
# correct type converter may be used. Subclasses
# of Query may define _supported_types and
# _type_converters to define their own types.
raise TypeError()
converted_value = self._type_converters[type](self.value)
if isinstance(converted_value, datetime.datetime):
converted_value = timeutils.normalize_time(converted_value)
except ValueError:
msg = (_('Unable to convert the value %(value)s'
' to the expected data type %(type)s.') %
{'value': self.value, 'type': type})
raise ClientSideError(msg)
except TypeError:
msg = (_('The data type %(type)s is not supported. The supported'
' data type list is: %(supported)s') %
{'type': type, 'supported': self._supported_types})
raise ClientSideError(msg)
except Exception:
msg = (_('Unexpected exception converting %(value)s to'
' the expected data type %(type)s.') %
{'value': self.value, 'type': type})
raise ClientSideError(msg)
return converted_value
class JsonType(wtypes.UserType):
"""A simple JSON type."""
basetype = wtypes.text
name = 'json'
@staticmethod
def validate(value):
# check that value can be serialised
json.dumps(value)
return value
|
idegtiarov/ceilometer
|
ceilometer/api/controllers/v2/base.py
|
Python
|
apache-2.0
| 8,636 | 0 |
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
for i in range(len(digits) - 1, -1, -1):
if digits[i] < 9:
digits[i] += 1
return digits
digits[i] = 0
new_digits = [1]
new_digits.extend([0] * len(digits))
return new_digits
|
yehzhang/RapidTest
|
examples/solutions/plus_one.py
|
Python
|
mit
| 400 | 0 |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
#with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='jupyterdrive',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='1.1.0',
description='Integration of IPython/Jupyter with Google drive',
long_description='',
# The project's main homepage.
url='https://github.com/jupyter/jupyter-drive',
# Author details
author='Matthias Bussonnier, Kester Tong, Kyle Kelley, Thomas Kluyver, The IPython team',
author_email='ipython-dev@scipy.org',
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: IPython',
],
# What does your project relate to?
keywords='ipython jupyter google drive notebook',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['notebook'],
# have to be included in MANIFEST.in as well.
package_data={
'jupyterdrive': [ '*.json',
'*.py',
'gdrive/*.js',
],
},
)
|
jupyter/jupyter-drive
|
setup.py
|
Python
|
bsd-2-clause
| 2,752 | 0.002544 |
__author__ = 'asifj'
import logging
from kafka import KafkaConsumer
import json
import traceback
from bson.json_util import dumps
from kafka import SimpleProducer, KafkaClient
from utils import Utils
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.INFO
)
inputs = []
consumer = KafkaConsumer("SAPEvent", bootstrap_servers=['172.22.147.242:9092', '172.22.147.232:9092', '172.22.147.243:9092'], auto_commit_enable=False, auto_offset_reset="smallest")
message_no = 1
inputs = consumer.fetch_messages()
'''for message in consumer:
topic = message.topic
partition = message.partition
offset = message.offset
key = message.key
message = message.value
print "================================================================================================================="
if message is not None:
try:
document = json.loads(message)
collection = document.keys()[0]
if collection == "customerMaster":
print "customerMaster"
elif collection == "srAttachements":
#print dumps(document, sort_keys=True)
inputs.append(document)
except Exception, err:
print "CustomException"
print "Kafka Message: "+str(message)
print(traceback.format_exc())
print "================================================================================================================="
print "\n"
message_no += 1
'''
# To send messages synchronously
kafka = KafkaClient('172.22.147.232:9092,172.22.147.242:9092,172.22.147.243:9092')
producer = SimpleProducer(kafka)
for i in inputs:
try:
#producer.send_messages(b'SAPEvent', json.dumps(input))
document = json.loads(str(i.value))
type = document.keys()[0]
if type == "srDetails":
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
row = []
utils = Utils()
row = utils.validate_sr_details( document['srDetails'], row)
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "\n\n"
except Exception:
print "Kafka: "+str(document)
print Exception.message
print(traceback.format_exc())
|
asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase
|
KafkaCP.py
|
Python
|
apache-2.0
| 2,510 | 0.003586 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RegulatoryComplianceControlsOperations(object):
"""RegulatoryComplianceControlsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
regulatory_compliance_standard_name, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RegulatoryComplianceControlList"]
"""All supported regulatory compliance controls details and state for selected standard.
:param regulatory_compliance_standard_name: Name of the regulatory compliance standard object.
:type regulatory_compliance_standard_name: str
:param filter: OData filter. Optional.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegulatoryComplianceControlList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.security.models.RegulatoryComplianceControlList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceControlList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'regulatoryComplianceStandardName': self._serialize.url("regulatory_compliance_standard_name", regulatory_compliance_standard_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RegulatoryComplianceControlList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards/{regulatoryComplianceStandardName}/regulatoryComplianceControls'} # type: ignore
def get(
self,
regulatory_compliance_standard_name, # type: str
regulatory_compliance_control_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RegulatoryComplianceControl"
"""Selected regulatory compliance control details and state.
:param regulatory_compliance_standard_name: Name of the regulatory compliance standard object.
:type regulatory_compliance_standard_name: str
:param regulatory_compliance_control_name: Name of the regulatory compliance control object.
:type regulatory_compliance_control_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegulatoryComplianceControl, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RegulatoryComplianceControl
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceControl"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'regulatoryComplianceStandardName': self._serialize.url("regulatory_compliance_standard_name", regulatory_compliance_standard_name, 'str'),
'regulatoryComplianceControlName': self._serialize.url("regulatory_compliance_control_name", regulatory_compliance_control_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegulatoryComplianceControl', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards/{regulatoryComplianceStandardName}/regulatoryComplianceControls/{regulatoryComplianceControlName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/security/azure-mgmt-security/azure/mgmt/security/operations/_regulatory_compliance_controls_operations.py
|
Python
|
mit
| 9,157 | 0.004477 |
# -*- coding: utf-8 -*-
from __future__ import division
from openfisca_core import reforms
from openfisca_france.model.base import FloatCol, Individus, Variable
# Build function
def build_reform(tax_benefit_system):
Reform = reforms.make_reform(
key = 'revenu_de_base_cotisations',
name = u"Réforme des cotisations pour un Revenu de base",
reference = tax_benefit_system,
)
class cotisations_contributives(Variable):
column = FloatCol
entity_class = Individus
label = u"Nouvelles cotisations contributives"
def function(self, simulation, period):
ags = simulation.calculate('ags', period)
agff_tranche_a_employeur = simulation.calculate('agff_tranche_a_employeur', period)
apec_employeur = simulation.calculate('apec_employeur', period)
arrco_tranche_a_employeur = simulation.calculate('arrco_tranche_a_employeur', period)
assedic_employeur = simulation.calculate('assedic_employeur', period)
cotisation_exceptionnelle_temporaire_employeur = simulation.calculate(
'cotisation_exceptionnelle_temporaire_employeur', period)
fonds_emploi_hospitalier = simulation.calculate('fonds_emploi_hospitalier', period)
ircantec_employeur = simulation.calculate('ircantec_employeur', period)
pension_civile_employeur = simulation.calculate('pension_civile_employeur', period)
prevoyance_obligatoire_cadre = simulation.calculate('prevoyance_obligatoire_cadre', period)
rafp_employeur = simulation.calculate('rafp_employeur', period)
vieillesse_deplafonnee_employeur = simulation.calculate('vieillesse_deplafonnee_employeur', period)
vieillesse_plafonnee_employeur = simulation.calculate('vieillesse_plafonnee_employeur', period)
allocations_temporaires_invalidite = simulation.calculate('allocations_temporaires_invalidite', period)
accident_du_travail = simulation.calculate('accident_du_travail', period)
agff_tranche_a_employe = simulation.calculate('agff_tranche_a_employe', period)
agirc_tranche_b_employe = simulation.calculate('agirc_tranche_b_employe', period)
apec_employe = simulation.calculate('apec_employe', period)
arrco_tranche_a_employe = simulation.calculate('arrco_tranche_a_employe', period)
assedic_employe = simulation.calculate('assedic_employe', period)
cotisation_exceptionnelle_temporaire_employe = simulation.calculate(
'cotisation_exceptionnelle_temporaire_employe', period)
ircantec_employe = simulation.calculate('ircantec_employe', period)
pension_civile_employe = simulation.calculate('pension_civile_employe', period)
rafp_employe = simulation.calculate('rafp_employe', period)
vieillesse_deplafonnee_employe = simulation.calculate('vieillesse_deplafonnee_employe', period)
vieillesse_plafonnee_employe = simulation.calculate('vieillesse_plafonnee_employe', period)
cotisations_contributives = (
# cotisations patronales contributives dans le prive
ags +
agff_tranche_a_employeur +
apec_employeur +
arrco_tranche_a_employeur +
assedic_employeur +
cotisation_exceptionnelle_temporaire_employeur +
prevoyance_obligatoire_cadre + # TODO contributive ou pas
vieillesse_deplafonnee_employeur +
vieillesse_plafonnee_employeur +
# cotisations patronales contributives dans le public
fonds_emploi_hospitalier +
ircantec_employeur +
pension_civile_employeur +
rafp_employeur +
# anciennes cot patronales non-contributives classées ici comme contributives
allocations_temporaires_invalidite +
accident_du_travail +
# anciennes cotisations salariales contributives dans le prive
agff_tranche_a_employe +
agirc_tranche_b_employe +
apec_employe +
arrco_tranche_a_employe +
assedic_employe +
cotisation_exceptionnelle_temporaire_employe +
vieillesse_deplafonnee_employe +
vieillesse_plafonnee_employe +
# anciennes cotisations salariales contributives dans le public
ircantec_employe +
pension_civile_employe +
rafp_employe
)
return period, cotisations_contributives
class nouv_salaire_de_base(Variable):
reference = tax_benefit_system.column_by_name['salaire_de_base']
# Le salaire brut se définit dans la réforme comme le salaire super-brut auquel
# on retranche les cotisations contributives
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
salsuperbrut = simulation.calculate('salsuperbrut', period)
cotisations_contributives = simulation.calculate('cotisations_contributives', period)
nouv_salaire_de_base = (
salsuperbrut -
cotisations_contributives
)
return period, nouv_salaire_de_base
class nouv_csg(Variable):
reference = tax_benefit_system.column_by_name['csg_imposable_salaire']
# On applique une CSG unique à 22,5% qui finance toutes les prestations non-contributives
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
nouv_salaire_de_base = simulation.calculate('nouv_salaire_de_base', period)
nouv_csg = (
-0.225 * nouv_salaire_de_base
)
return period, nouv_csg
class salaire_net(Variable):
reference = tax_benefit_system.column_by_name['salaire_net']
# On retire la nouvelle CSG (pas celle qui finance le RDB) pour trouver le nouveau salaire net
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
nouv_salaire_de_base = simulation.calculate('nouv_salaire_de_base', period)
nouv_csg = simulation.calculate('nouv_csg', period)
salaire_net = (
nouv_salaire_de_base +
nouv_csg
)
return period, salaire_net
class salaire_imposable(Variable):
reference = tax_benefit_system.column_by_name['salaire_imposable']
# Nous sommes partis du nouveau salaire net et par rapport au salaire imposable actuel,
# nous avons supprimé : les heures sup, la déductibilité de CSG
def function(self, simulation, period):
period = period
hsup = simulation.calculate('hsup', period)
salaire_net = simulation.calculate('salaire_net', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
supp_familial_traitement = simulation.calculate('supp_familial_traitement', period)
rev_microsocial_declarant1 = simulation.calculate('rev_microsocial_declarant1', period)
return period, (
salaire_net +
primes_fonction_publique +
indemnite_residence +
supp_familial_traitement +
hsup +
rev_microsocial_declarant1
)
return Reform()
|
openfisca/openfisca-france-extension-revenu-de-base
|
openfisca_france_extension_revenu_de_base/cotisations.py
|
Python
|
agpl-3.0
| 7,784 | 0.004758 |
# -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{"name": "Base Comments Templates",
"summary": "Comments templates on documents",
"version": "8.0.1.0.0",
"depends": ["base"],
"author": "Camptocamp,Odoo Community Association (OCA)",
"data": ["comment_view.xml",
'security/ir.model.access.csv',
],
"category": "Sale",
"installable": True,
"active": False, }
|
Endika/account-invoice-reporting
|
base_comment_template/__openerp__.py
|
Python
|
agpl-3.0
| 1,121 | 0 |
import contextlib
import gc
import multiprocessing
import os
from memsql_loader.util.apsw_storage import APSWStorage
from memsql_loader.util import paths
MEMSQL_LOADER_DB = 'memsql_loader.db'
def get_loader_db_path():
return os.path.join(paths.get_data_dir(), MEMSQL_LOADER_DB)
# IMPORTANT NOTE: This class cannot be shared across forked processes unless
# you use fork_wrapper.
class LoaderStorage(APSWStorage):
_instance = None
_initialized = False
_instance_lock = multiprocessing.RLock()
# We use LoaderStorage as a singleton.
def __new__(cls, *args, **kwargs):
with cls._instance_lock:
if cls._instance is None:
cls._instance = super(LoaderStorage, cls).__new__(
cls, *args, **kwargs)
cls._initialized = False
return cls._instance
@classmethod
def drop_database(cls):
with cls._instance_lock:
if os.path.isfile(get_loader_db_path()):
os.remove(get_loader_db_path())
if os.path.isfile(get_loader_db_path() + '-shm'):
os.remove(get_loader_db_path() + '-shm')
if os.path.isfile(get_loader_db_path() + '-wal'):
os.remove(get_loader_db_path() + '-wal')
cls._instance = None
@classmethod
@contextlib.contextmanager
def fork_wrapper(cls):
# This context manager should be used around any code that forks new
# processes that will use a LoaderStorage object (e.g. Worker objects).
# This ensures that we don't share SQLite connections across forked
# processes.
with cls._instance_lock:
if cls._instance is not None:
cls._instance.close_connections()
# We garbage collect here to clean up any SQLite objects we
# may have missed; this is important because any surviving
# objects post-fork will mess up SQLite connections in the
# child process. We use generation=2 to collect as many
# objects as possible.
gc.collect(2)
yield
with cls._instance_lock:
if cls._instance is not None:
cls._instance.setup_connections()
def __init__(self):
with LoaderStorage._instance_lock:
# Since this is a singleton object, we don't want to call the
# parent object's __init__ if we've already instantiated this
# object in __new__. However, we may have closed this object's
# connections in fork_wrapper above; in that case, we want to set
# up new database connections.
if not LoaderStorage._initialized:
super(LoaderStorage, self).__init__(get_loader_db_path())
LoaderStorage._initialized = True
return
elif not self._db or not self._db_t:
self.setup_connections()
|
memsql/memsql-loader
|
memsql_loader/loader_db/storage.py
|
Python
|
apache-2.0
| 2,959 | 0.000676 |
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Exiern'
language = 'en'
url = 'http://www.exiern.com/'
start_date = '2005-09-06'
rights = 'Dan Standing'
class Crawler(CrawlerBase):
history_capable_days = 30
schedule = 'Tu,Th'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
feed = self.parse_feed('http://www.exiern.com/?feed=rss2')
for entry in feed.for_date(pub_date):
url = entry.summary.src('img', allow_multiple=True)
if url:
url = url[0]
url = url.replace('comics-rss', 'comics')
title = entry.title
return CrawlerImage(url, title)
|
datagutten/comics
|
comics/comics/exiern.py
|
Python
|
agpl-3.0
| 784 | 0 |
"""
Created on 04.07.2017
:author: Humbert Moreaux
Tuleap REST API Client for Python
Copyright (c) Humbert Moreaux, All rights reserved.
This Python module is free software; you can redistribute it and/or modify it under the terms of the
GNU Lesser General Public License as published by the Free Software Foundation; either version 3.0
of the License, or (at your option) any later version.
This Python module is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with this library. If
not, see <http://www.gnu.org/licenses/>.
"""
import json
# Public -------------------------------------------------------------------------------------------
class PullRequests(object):
"""
Handles "/pull_requests" methods of the Tuleap REST API.
Fields type information:
:type _connection: Tuleap.RestClient.Connection.Connection
:type _data: dict | list[dict]
"""
def __init__(self, connection):
"""
Constructor
:param connection: connection object (must already be logged in)
:type connection: Tuleap.RestClient.Connection.Connection
"""
self._connection = connection
self._data = None
def get_data(self):
"""
Get data received in the last response message.
:return: Response data
:rtype: dict | list[dict]
:note: One of the request method should be successfully executed before this method is
called!
"""
return self._data
def request_pull_request(self, pull_request_id):
"""
Request pull request data from the server using the "/pull_requests" method of the Tuleap REST
API.
:param int pull_request_id: Pull request ID
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request
relative_url = "/pull_requests/{:}".format(pull_request_id)
success = self._connection.call_get_method(relative_url)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def request_comments(self, pull_request_id, limit=10, offset=None):
"""
Request pull request comments using the "/pull_requests" method of the Tuleap REST API.
:param pull_request_id: Pull request ID
:param int limit: Optional parameter for maximum limit of returned projects
:param int offset: Optional parameter for start index for returned projects
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request comments
relative_url = "/pull_requests/{:}/comments".format(pull_request_id)
parameters = dict()
if limit is not None:
parameters["limit"] = limit
if offset is not None:
parameters["offset"] = offset
success = self._connection.call_get_method(relative_url, parameters)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def request_file_diff(self, pull_request_id, path):
"""
Request pull request diff of a given file using the "/pull_requests" method of the Tuleap REST API.
:param pull_request_id: Pull request ID
:param path: File path
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request diff of a given file
relative_url = "/pull_requests/{:}/file_diff".format(pull_request_id)
parameters = dict()
parameters["path"] = path
success = self._connection.call_get_method(relative_url, parameters)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def request_files(self, pull_request_id):
"""
Request pull request files using the "/pull_requests" method of the Tuleap REST API.
:param pull_request_id: Pull request ID
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request files
relative_url = "/pull_requests/{:}/files".format(pull_request_id)
success = self._connection.call_get_method(relative_url)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def create_pull_request(self, repository_id, branch_src, repository_dest_id, branch_dest):
"""
Create a pull request from the server using the "/pull_requests" method of the REST API.
:param int repository_id: Repository ID
:param string branch_src: Branch source name
:param int repository_dest_id: Destination repository ID
:param string branch_dest: Destination repository name
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Create a pull request
relative_url = "/pull_requests"
parameters = dict()
if repository_id and branch_src and repository_dest_id and branch_dest:
parameters["content"] = {
"repository_id": repository_id,
"branch_src": branch_src,
"repository_dest_id": repository_dest_id,
"branch_dest": branch_dest,
}
else:
raise Exception("Error: invalid content values")
success = self._connection.call_post_method(relative_url, parameters)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def get_last_response_message(self):
"""
Get last response message.
:return: Last response message
:rtype: requests.Response
:note: This is just a proxy to the connection's method.
"""
self._connection.get_last_response_message()
|
djurodrljaca/tuleap-rest-api-client
|
Tuleap/RestClient/PullRequests.py
|
Python
|
lgpl-3.0
| 6,833 | 0.002927 |
# Add any code that updates the current probability
# values of any of the nodes here.
# For example, here is a method that updates the probability of
# a single node, where this node is assumed to have a single parent.
def update_node_with_one_parent(n):
'''
For all possible values pv of the current node,
For all possible values ppv of the parent,
Look up the conditional probability of pv given ppv.
and multiply it by the current prob. of that parent state (ppv)
and accumulate these to get the current probability of pv.
'''
if len(n.parents)!= 1:
print "The function update_node_with_one_parent cannot handle node "+n.name
print "It does not have exactly one parent."
return
parent = n.parents[0]
for pv in n.possible_values:
n.current_prob[pv] = 0.0
for ppv in n.parents[0].possible_values:
conditional = n.name+'='+str(pv)+'|'+parent.name+'='+str(ppv)
n.current_prob[pv] += n.p[conditional] * parent.current_prob[ppv]
def gen_cartesian_product(sets):
'''Return the cartesian product of a list of sets.
For example: [['a','b'],[0,1],[7,8,9]] should give a 12 element set of triples.'''
if len(sets)==1:
return map(lambda set: [set], sets[0])
subproduct = gen_cartesian_product(sets[1:])
prod = []
for elt in sets[0]:
new_tuples = map(lambda tup: [elt]+tup, subproduct)
prod = prod + new_tuples
return prod
def update_node_with_k_parents(n):
'''
For all possible values pv of the current node,
For all possible values ppv of each of the parents,
Look up the conditional probability of pv given ppv.
and multiply it by the current prob. of that parent state (ppv)
and accumulate these to get the current probability of pv.
'''
print "Updating node: "+n.name
if len(n.parents) < 1:
print "The function update_node_with_k_parents cannot handle node "+n.name
print "It does not have any parents."
return
cartesian_prod = gen_cartesian_product(map(lambda p: p.possible_values, n.parents))
parent_names = map(lambda p: p.name, n.parents)
for pv in n.possible_values:
n.current_prob[pv] = 0.0
print " Updating current prob. of "+pv
for ppv_tuple in cartesian_prod:
print " Adding the contribution for "+str(ppv_tuple)
conditional = n.name+'='+pv+'|'+str(parent_names) +'='+str(ppv_tuple)
parent_vector_prob = reduce(lambda a,b:a*b, map(lambda p, pv:p.current_prob[pv], n.parents, ppv_tuple))
n.current_prob[pv] += n.p[conditional] * parent_vector_prob
#update_node_with_one_parent(nodeB)
|
uraplutonium/adtree-py
|
src/BayesUpdating.py
|
Python
|
gpl-2.0
| 2,726 | 0.005869 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test setting the JAVAC variable.
"""
import os
import os.path
import string
import sys
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myjavac.py', r"""
import sys
args = sys.argv[1:]
while args:
a = args[0]
if a == '-d':
args = args[1:]
elif a == '-sourcepath':
args = args[1:]
else:
break
args = args[1:]
for file in args:
infile = open(file, 'rb')
outfile = open(file[:-5] + '.class', 'wb')
for l in infile.readlines():
if l[:9] != '/*javac*/':
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(tools = ['javac'],
JAVAC = r'%(_python_)s myjavac.py')
env.Java(target = '.', source = '.')
""" % locals())
test.write('test1.java', """\
test1.java
/*javac*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match('test1.class', "test1.java\nline 3\n")
if os.path.normcase('.java') == os.path.normcase('.JAVA'):
test.write('SConstruct', """\
env = Environment(tools = ['javac'],
JAVAC = r'%(_python_)s myjavac.py')
env.Java(target = '.', source = '.')
""" % locals())
test.write('test2.JAVA', """\
test2.JAVA
/*javac*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match('test2.class', "test2.JAVA\nline 3\n")
test.pass_test()
|
datalogics/scons
|
test/Java/JAVAC.py
|
Python
|
mit
| 2,542 | 0.003934 |
#!/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Billy Olsen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from email.mime.text import MIMEText
from jinja2 import Environment, FileSystemLoader
from datetime import datetime as dt
import os
import six
import smtplib
# Get the directory for this file.
SECRET_SANTA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'templates')
j2env = Environment(loader=FileSystemLoader(SECRET_SANTA_DIR),
trim_blocks=False)
class SantaMail(object):
"""
The SantaMail object is used to send email. This class will load email
templates that should be sent out (the master list email and the email
for each Secret Santa.
Templates will be loaded from the template directory and is configurable
via the template_master and template_santa configuration variables.
"""
REQUIRED_PARAMS = ['author', 'email', 'smtp', 'username', 'password']
def __init__(self, author, email, smtp, username, password,
template_master="master.tmpl", template_santa="santa.tmpl"):
self.author = author
self.email = email
self.smtp = smtp
self.username = username
self.password = password
self.template_master = template_master
self.template_santa = template_santa
def send(self, pairings):
"""
Sends the emails out to the secret santa participants.
The secret santa host (the user configured to send the email from)
will receive a copy of the master list.
Each Secret Santa will receive an email with the contents of the
template_santa template.
"""
for pair in pairings:
self._send_to_secret_santa(pair)
self._send_master_list(pairings)
def _do_send(self, toaddr, body, subject):
try:
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = self.email
msg['To'] = toaddr
server = smtplib.SMTP(self.smtp)
server.starttls()
server.login(self.username, self.password)
server.sendmail(self.email, [toaddr], msg.as_string())
server.quit()
except:
print("Error sending email to %s!" % toaddr)
def _send_to_secret_santa(self, pair):
"""
Sends an email to the secret santa pairing.
"""
(giver, receiver) = pair
template = j2env.get_template(self.template_santa)
body = template.render(giver=giver, receiver=receiver)
year = dt.utcnow().year
subject = ('Your %s Farmer Family Secret Santa Match' % year)
self._do_send(giver.email, body, subject)
def _send_master_list(self, pairings):
"""
Sends an email to the game master.
"""
pair_list = []
for pair in pairings:
(giver, recipient) = pair
pair_list.append("%s -> %s" % (giver.name, recipient.name))
template = j2env.get_template(self.template_master)
body = template.render(pairs=pair_list)
year = dt.utcnow().year
subject = ('%s Farmer Family Secret Santa Master List' % year)
self._do_send(self.email, body, subject)
|
wolsen/secret-santa
|
secretsanta/mail.py
|
Python
|
mit
| 4,308 | 0.001161 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0013_auto_20160210_0400'),
]
operations = [
migrations.AlterField(
model_name='category',
name='order',
field=models.PositiveIntegerField(default=1),
),
migrations.AlterField(
model_name='question',
name='order',
field=models.PositiveIntegerField(default=1),
),
]
|
moshthepitt/answers
|
questions/migrations/0014_auto_20160210_0406.py
|
Python
|
mit
| 573 | 0 |
import pytest
from amoco.config import conf
conf.UI.formatter = 'Null'
conf.Cas.unicode = False
conf.UI.unicode = False
from amoco.arch.tricore import cpu
def test_decoder_START():
c = b'\x91\x00\x00\xf8'
i = cpu.disassemble(c)
assert i.mnemonic=='MOVH_A'
assert i.operands[0] is cpu.A[15]
assert i.operands[1]==0x8000
c = b'\xd9\xff\x14\x02'
i = cpu.disassemble(c)
assert i.mnemonic=="LEA"
assert i.mode=="Long-offset"
assert i.operands[2]==0x2014
c = b'\xdc\x0f'
i = cpu.disassemble(c)
assert i.mnemonic=="JI"
assert i.operands[0]==cpu.A[15]
c = b'\x00\x90'
i = cpu.disassemble(c)
assert i.mnemonic=="RET"
c = b'\x00\x00'
i = cpu.disassemble(c)
assert i.mnemonic=="NOP"
def test_decoder_ldw():
c = b'\x19\xf0\x10\x16'
i = cpu.disassemble(c)
assert str(i)=="ld.w d0 , a15, 0x6050"
def test_movh():
c = b'\x7b\xd0\x38\xf1'
i = cpu.disassemble(c)
|
bdcht/amoco
|
tests/test_arch_tricore.py
|
Python
|
gpl-2.0
| 918 | 0.037037 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Various test reports and formatters are defined here. These are used for
unit test and test framework reporting.
Generally, you don't use this package in the normal way. Instead, you call
the 'get_report' function in this module with a particular pattern of
paramters and it will return a report object according to that. Any
necessary report objects and modules are specified there, and imported as
necessary.
e.g.:
get_report( ("StandardReport", "reportfile", "text/plain") )
Note that the argument is a single tuple. A list of these may be supplied
for a "stacked" report.
The first argument is a report object name (plus module, if necessary).
Any remaining argumments in the tuple are passed to the specified reports
constructor.
"""
__all__ = ['ANSI', 'Eventlog', 'Curses', 'Html', 'Email']
import sys, os
from pycopia import UserFile
from pycopia import timelib
NO_MESSAGE = "no message"
# map mime type to formatter class name and file extension
_FORMATTERS = {
None: ("StandardFormatter", "txt"), # default
"text/plain": ("StandardFormatter", "txt"), # plain text
"text/ascii": ("StandardFormatter", "asc"), # plain text
"text/html": ("pycopia.reports.Html.XHTMLFormatter", "html"), # HTML
"text/ansi": ("pycopia.reports.ANSI.ANSIFormatter", "ansi"), # text with ANSI-term color escapes
"text/ansi; charset=utf8": ("pycopia.reports.utf8ANSI.UTF8Formatter", "ansi"),
}
# register another formatter object that adheres to the NullFormatter
# interface.
def register_formatter(mimetype, classpath, fileextension):
global _FORMATTERS
_FORMATTERS[mimetype] = (classpath, fileextension)
class ReportError(Exception):
pass
class ReportFindError(ReportError):
pass
class BadReportError(ReportError):
pass
class NullFormatter(object):
def title(self, title):
return ""
def heading(self, text, level=1):
return ""
def paragraph(self, text, level=1):
return ""
def summaryline(self, line):
return ""
def message(self, msgtype, msg, level=1):
return ""
def passed(self, msg=NO_MESSAGE, level=1):
return self.message("PASSED", msg, level)
def failed(self, msg=NO_MESSAGE, level=1):
return self.message("FAILED", msg, level)
def expectedfail(self, msg=NO_MESSAGE, level=1):
return self.message("EXPECTED_FAIL", msg, level)
def incomplete(self, msg=NO_MESSAGE, level=1):
return self.message("INCOMPLETE", msg, level)
def abort(self, msg=NO_MESSAGE, level=1):
return self.message("ABORT", msg, level)
def info(self, msg, level=1):
return self.message("INFO", msg, level)
def diagnostic(self, msg, level=1):
return self.message("DIAGNOSTIC", msg, level)
def text(self, text):
return text
def analysis(self, text):
return text
def url(self, text, url):
return ""
def page(self):
return ""
def endpage(self):
return ""
def section(self):
return ""
def endsection(self):
return ""
def initialize(self, *args):
return ""
def finalize(self):
return ""
class NullReport(object):
"""NullReport defines the interface for report objects. It is the base
class for all Report objects."""
# overrideable methods
def write(self, text):
raise NotImplementedError, "override me!"
def writeline(self, text=""):
raise NotImplementedError, "override me!"
def writelines(self, lines):
raise NotImplementedError, "override me!"
filename = property(lambda s: None)
filenames = property(lambda s: [])
def initialize(self, config=None): pass
def logfile(self, filename): pass
def finalize(self): pass
def add_title(self, title): pass
def add_heading(self, text, level=1): pass
def add_message(self, msgtype, msg, level=1): pass
def add_summary(self, entries): pass
def add_text(self, text): pass
def add_analysis(self, text): pass
def add_data(self, data, datatype, note=None): pass
def add_url(self, text, url): pass
def passed(self, msg=NO_MESSAGE, level=1): pass
def failed(self, msg=NO_MESSAGE, level=1): pass
def expectedfail(self, msg=NO_MESSAGE, level=1): pass
def incomplete(self, msg=NO_MESSAGE, level=1): pass
def abort(self, msg=NO_MESSAGE, level=1): pass
def info(self, msg, level=1): pass
def diagnostic(self, msg, level=1): pass
def newpage(self): pass
def newsection(self): pass
class DebugReport(NullReport):
"""Used for debugging tests and reports. Just emits plain messages.
"""
# overrideable methods
def write(self, text):
raise NotImplementedError, "override me!"
def writeline(self, text=""):
raise NotImplementedError, "override me!"
def writelines(self, lines):
raise NotImplementedError, "override me!"
filename = property(lambda s: "")
filenames = property(lambda s: [])
def initialize(self, config=None):
print "initialize: %r" % (config,)
def logfile(self, filename):
print "logfile:", filename
def finalize(self):
print "finalize"
def add_title(self, title):
print "add_title:", title
def add_heading(self, text, level=1):
print "add_heading:", repr(text), level
def add_message(self, msgtype, msg, level=1):
print "add_message:", msgtype, repr(msg), level
def add_summary(self, entries):
print "add_summary"
def add_text(self, text):
print "add_text"
def add_analysis(self, text):
print "add_analysis"
def add_data(self, data, datatype, note=None):
print "add_data type: %s note: %s" % (datatype, note)
def add_url(self, text, url):
print "add_url:", repr(text), repr(url)
def passed(self, msg=NO_MESSAGE, level=1):
print "passed:", repr(msg), level
def failed(self, msg=NO_MESSAGE, level=1):
print "failed:", repr(msg), level
def expectedfail(self, msg=NO_MESSAGE, level=1):
print "expected fail:",repr(msg), level
def incomplete(self, msg=NO_MESSAGE, level=1):
print "incomplete:", repr(msg), level
def abort(self, msg=NO_MESSAGE, level=1):
print "abort:", repr(msg), level
def info(self, msg, level=1):
print "info:", repr(msg), level
def diagnostic(self, msg, level=1):
print "diagnostic:", repr(msg), level
def newpage(self):
print "newpage"
def newsection(self):
print "newsection"
class StandardReport(UserFile.FileWrapper, NullReport):
"""StandardReport writes to a file or file-like object, such as stdout. If
the filename specified is "-" then use stdout. """
def __init__(self, name=None, formatter=None):
self._do_close = 0
self._formatter, self.fileext = get_formatter(formatter)
if type(name) is str:
if name == "-":
fo = sys.stdout
else:
name = "%s.%s" % (name, self.fileext)
fo = open(os.path.expanduser(os.path.expandvars(name)), "w")
self._do_close = 1
elif name is None:
fo = sys.stdout
else:
fo = name # better be a file object
UserFile.FileWrapper.__init__(self, fo)
filename = property(lambda s: s._fo.name)
filenames = property(lambda s: [s._fo.name])
def initialize(self, config=None):
self.write(self._formatter.initialize())
def finalize(self):
self.write(self._formatter.finalize())
self.flush()
if self._do_close:
self.close()
def add_title(self, title):
self.write(self._formatter.title(title))
def add_heading(self, text, level=1):
self.write(self._formatter.heading(text, level))
def add_message(self, msgtype, msg, level=1):
self.write(self._formatter.message(msgtype, msg, level))
def passed(self, msg=NO_MESSAGE, level=1):
self.write(self._formatter.passed(msg, level))
def failed(self, msg=NO_MESSAGE, level=1):
self.write(self._formatter.failed(msg, level))
def expectedfail(self, msg=NO_MESSAGE, level=1):
self.write(self._formatter.expectedfail(msg, level))
def incomplete(self, msg=NO_MESSAGE, level=1):
self.write(self._formatter.incomplete(msg, level))
def abort(self, msg=NO_MESSAGE, level=1):
self.write(self._formatter.abort(msg, level))
def info(self, msg, level=1):
self.write(self._formatter.info(msg, level))
def diagnostic(self, msg, level=1):
self.write(self._formatter.diagnostic(msg, level))
def add_text(self, text):
self.write(self._formatter.text(text))
def add_analysis(self, text):
self.write(self._formatter.analysis(text))
def add_data(self, data, datatype, note=None):
self.write(self._formatter.text(
" DATA type: %s note: %s\n" % (datatype, note)))
def add_url(self, text, url):
self.write(self._formatter.url(text, url))
def add_summary(self, entries):
lines = map(self._formatter.summaryline, entries)
self.write("\n".join(lines))
self.write("\n")
def newpage(self):
self.write(self._formatter.page())
def newsection(self):
self.write(self._formatter.section())
class StandardFormatter(NullFormatter):
"""The Standard formatter just emits plain ASCII text."""
MIMETYPE = "text/plain"
def title(self, title):
s = ["="*len(title)]
s.append("%s" % title)
s.append("="*len(title))
s.append("\n")
return "\n".join(s)
def heading(self, text, level=1):
s = ["\n"]
s.append("%s%s" % (" "*(level-1), text))
s.append("%s%s" % (" "*(level-1), "-"*len(text)))
s.append("\n")
return "\n".join(s)
def message(self, msgtype, msg, level=1):
if msgtype.find("TIME") >= 0:
msg = timelib.localtimestamp(msg)
return "%s%s: %s\n" % (" "*(level-1), msgtype, msg)
def text(self, text):
return text
def analysis(self, text):
return "ANALYSIS:\n" + text
def url(self, text, url):
return "%s: <%s>\n" % (text, url)
def summaryline(self, s):
s = str(s)
if len(s) <= 66:
return s
halflen = (min(66, len(s))/2)-2
return s[:halflen]+"[..]"+s[-halflen:]
def page(self):
return "\n\n\n"
def section(self):
return "\n"
def paragraph(self, text, level=1):
return text+"\n"
class FailureReport(StandardReport):
"FailureReport() A Report type that only prints failures and diagnostics."
def __init__(self, name=None, formatter=None):
StandardReport.__init__(self, name, formatter)
self.state = 0
def add_message(self, msgtype, msg, level=1):
if msgtype == "FAILED":
self.state = -1
self.write(self._formatter.message(msgtype, msg, level))
else:
if self.state == -1 and msgtype == "DIAGNOSTIC":
self.write(self._formatter.message(msgtype, msg, level))
else:
self.state = 0
class TerseReport(StandardReport):
"TerseReport() A Report type that only prints results."
def __init__(self, name=None, formatter=None):
StandardReport.__init__(self, name, formatter)
def add_message(self, msgtype, msg, level=1):
if msgtype in ("PASSED", "FAILED"):
self.write(self._formatter.message(msgtype, msg, level))
#def add_title(self, title): pass
def add_heading(self, text, level=1): pass
def add_summary(self, entries): pass
def add_text(self, text): pass
def add_url(self, text, url): pass
class StackedReport(object):
"""StackedReport allows stacking of reports, which creates multiple
reports simultaneously. It adds a new method, add_report() that is
used to add on a new report object. """
def __init__(self, rpt=None):
self._reports = []
if rpt:
self.add_report(rpt)
def add_report(self, rpt_or_name, *args):
"""adds a new report to the stack."""
if type(rpt_or_name) is str:
rpt = get_report(rpt_or_name, *params )
self._reports.append(rpt)
elif isinstance(rpt_or_name, NullReport):
self._reports.append(rpt_or_name)
else:
raise BadReportError, "StackedReport: report must be name of report or report object."
def _get_names(self):
rv = []
for rpt in self._reports:
fn = rpt.filename
if fn:
rv.append(fn)
return rv
filenames = property(_get_names)
def add_title(self, title):
map(lambda rpt: rpt.add_title(title), self._reports)
def write(self, text):
map(lambda rpt: rpt.write(text), self._reports)
def writeline(self, text):
map(lambda rpt: rpt.writeline(text), self._reports)
def writelines(self, text):
map(lambda rpt: rpt.writelines(text), self._reports)
def add_heading(self, text, level=1):
map(lambda rpt: rpt.add_heading(text, level), self._reports)
def add_message(self, msgtype, msg, level=1):
map(lambda rpt: rpt.add_message(msgtype, msg, level), self._reports)
def passed(self, msg=NO_MESSAGE, level=1):
map(lambda rpt: rpt.passed(msg, level), self._reports)
def failed(self, msg=NO_MESSAGE, level=1):
map(lambda rpt: rpt.failed(msg, level), self._reports)
def expectedfail(self, msg=NO_MESSAGE, level=1):
map(lambda rpt: rpt.expectedfail(msg, level), self._reports)
def incomplete(self, msg=NO_MESSAGE, level=1):
map(lambda rpt: rpt.incomplete(msg, level), self._reports)
def abort(self, msg=NO_MESSAGE, level=1):
map(lambda rpt: rpt.abort(msg, level), self._reports)
def info(self, msg, level=1):
map(lambda rpt: rpt.info(msg, level), self._reports)
def diagnostic(self, msg, level=1):
map(lambda rpt: rpt.diagnostic(msg, level), self._reports)
def add_text(self, text):
map(lambda rpt: rpt.add_text(text), self._reports)
def add_analysis(self, text):
map(lambda rpt: rpt.add_analysis(text), self._reports)
def add_data(self, data, datatype, note=None):
map(lambda rpt: rpt.add_data(data, datatype, note), self._reports)
def add_url(self, text, url):
map(lambda rpt: rpt.add_url(text, url), self._reports)
def add_summary(self, entries):
map(lambda rpt: rpt.add_summary(entries), self._reports)
def newpage(self):
map(lambda rpt: rpt.newpage(), self._reports)
def logfile(self, fn):
map(lambda rpt: rpt.logfile(fn), self._reports)
def initialize(self, config=None):
map(lambda rpt: rpt.initialize(config), self._reports)
def finalize(self):
map(lambda rpt: rpt.finalize(), self._reports)
# Try to return an object from this module. If that fails, import and
# return the object given by the pathname (dot-delimited path to class).
def _get_object(name):
try:
return getattr(sys.modules[__name__], name)
except AttributeError:
i = name.rfind(".")
if i >= 0:
modname = name[:i]
try:
mod = sys.modules[modname]
except KeyError:
try:
mod = __import__(modname, globals(), locals(), ["*"])
except ImportError, err:
raise ReportFindError, \
"Could not find report module %s: %s" % (modname, err)
try:
return getattr(mod, name[i+1:])
except AttributeError:
raise ReportFindError, \
"Could not find report object: %s" % (name,)
else:
raise ReportFindError, "%s is not a valid object path." % (name,)
def get_report(args):
"""
If args is a list, it should contain argument-tuples that specify a series
of reports. A StackedReport object will be generated in that case.
Otherwise, args should be a tuple, with first arg the name of a report or
None (for StandardReport), and remaining args get passed to report
initializer.
"""
if type(args) is list:
rpt = StackedReport()
for subargs in args:
n = get_report(subargs)
rpt.add_report(n)
return rpt
name = args[0]
if name is None:
return apply(StandardReport, args[1:])
robj = _get_object(name)
if not hasattr(robj, "info"):
raise ReportFindError, "%s is not a valid report object." % (name,)
return apply(robj, args[1:])
def get_formatter(name, *args):
objname, ext = _FORMATTERS.get(name, (name, "txt"))
fobj = _get_object(objname)
form = apply(fobj, args)
return form, ext
if __name__ == "__main__":
rpt = get_report( ("StandardReport", "-", "text/plain") )
rpt.initialize()
rpt.add_title("The Title")
rpt.add_heading("Some heading")
rpt.info("some info")
rpt.passed("A message for a passed condition.")
rpt.finalize()
|
xiangke/pycopia
|
QA/pycopia/reports/__init__.py
|
Python
|
lgpl-2.1
| 17,925 | 0.004965 |
# vim: tabstop=8 shiftwidth=4 softtabstop=4 expandtab smarttab autoindent
# Altai API Service
# Copyright (C) 2012-2013 Grid Dynamics Consulting Services, Inc
# All Rights Reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import traceback
class AltaiApiException(Exception):
def __init__(self, message, status_code, reason=None, exc_type=None):
super(AltaiApiException, self).__init__(message)
self.status_code = status_code
self.reason = reason
if exc_type is not None:
self.exc_type = exc_type
else:
self.exc_type = self.__class__.__name__
def get_response_object(self):
lines = traceback.format_exception_only(type(self), self)
result = {
'message': '\n'.join(lines),
'error-type': self.exc_type
}
if self.reason:
result['reason'] = self.reason
return result
class InvalidRequest(AltaiApiException):
"""Exception raised on invalid requests"""
def __init__(self, message, reason=None):
super(InvalidRequest, self).__init__(message, 400, reason)
class InvalidElement(InvalidRequest):
def __init__(self, message, name, reason=None):
super(InvalidElement, self).__init__(message, reason)
self.name = name
def get_response_object(self):
rv = super(InvalidElement, self).get_response_object()
rv['element-name'] = self.name
return rv
class UnknownElement(InvalidElement):
"""Exception raised when unknown elements are present in response"""
def __init__(self, name, reason=None):
super(UnknownElement, self).__init__(
'Unknown resource element: %r' % name, name, reason)
class MissingElement(InvalidElement):
"""Exception raised when required request elements are missing"""
def __init__(self, name, reason=None):
super(MissingElement, self).__init__(
'Required element is missing: %r' % name, name, reason)
class InvalidElementValue(InvalidElement):
"""Exception raised when request element has illegal value"""
def __init__(self, name, typename, value, reason=None):
msg = 'Invalid value for element %s of type %s: %r' \
% (name, typename, value)
super(InvalidElementValue, self).__init__(msg, name, reason)
self.typename = typename
self.value = value
def get_response_object(self):
rv = super(InvalidElementValue, self).get_response_object()
rv['element-value'] = self.value
rv['element-type'] = self.typename
return rv
class InvalidArgument(InvalidRequest):
"""Exception raised when invalid argument is supplied for request"""
def __init__(self, message, name, reason=None):
super(InvalidArgument, self).__init__(message, reason)
self.name = name
def get_response_object(self):
rv = super(InvalidArgument, self).get_response_object()
rv['argument-name'] = self.name
return rv
class UnknownArgument(InvalidArgument):
"""Exception raised when unknown arguments are present in request"""
def __init__(self, name, reason=None):
super(UnknownArgument, self).__init__(
'Unknown request argument: %r' % name, name, reason)
class InvalidArgumentValue(InvalidArgument):
"""Exception raised when some client input has illegal value"""
def __init__(self, name, typename, value, reason=None):
msg = 'Invalid value for argument %s of type %s: %r' \
% (name, typename, value)
super(InvalidArgumentValue, self).__init__(msg, name, reason)
self.typename = typename
self.value = value
def get_response_object(self):
rv = super(InvalidArgumentValue, self).get_response_object()
rv['argument-value'] = self.value
rv['argument-type'] = self.typename
return rv
|
altai/altai-api
|
altai_api/exceptions.py
|
Python
|
lgpl-2.1
| 4,527 | 0.000221 |
# Test inplace special methods enabled by MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS
class A:
def __imul__(self, other):
print("__imul__")
return self
def __imatmul__(self, other):
print("__imatmul__")
return self
def __ifloordiv__(self, other):
print("__ifloordiv__")
return self
def __itruediv__(self, other):
print("__itruediv__")
return self
def __imod__(self, other):
print("__imod__")
return self
def __ipow__(self, other):
print("__ipow__")
return self
def __ior__(self, other):
print("__ior__")
return self
def __ixor__(self, other):
print("__ixor__")
return self
def __iand__(self, other):
print("__iand__")
return self
def __ilshift__(self, other):
print("__ilshift__")
return self
def __irshift__(self, other):
print("__irshift__")
return self
a = A()
try:
a *= None
except TypeError:
print("SKIP")
raise SystemExit
a @= None
a //= None
a /= None
a %= None
a **= None
a |= None
a ^= None
a &= None
a <<= None
a >>= None
# Normal operator should not fallback to inplace operator
try:
a * None
except TypeError:
print("TypeError")
|
MrSurly/micropython
|
tests/basics/class_inplace_op2.py
|
Python
|
mit
| 1,293 | 0.000773 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
class AvailableResult:
def __init__(self):
self.__dict__['available'] = False
def __getattr__(self, name):
if name == 'available':
return self.__dict__['available']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'available':
self.__dict__['available'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddBool(MSG_KEY_RESULT_AVAILABLE_AVAILABLE, self.__dict__['available'])
mmsg.AddMessage(MSG_KEY_RESULT_AVAILABLE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_AVAILABLE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['available'] = submsg.FindBool(MSG_KEY_RESULT_AVAILABLE_AVAILABLE)
class StatusResult:
def __init__(self):
self.__dict__['major'] = 0
self.__dict__['minor'] = 0
self.__dict__['fix'] = 0
self.__dict__['build'] = 0
self.__dict__['available'] = False
def __getattr__(self, name):
if name == 'major':
return self.__dict__['major']
if name == 'minor':
return self.__dict__['minor']
if name == 'fix':
return self.__dict__['fix']
if name == 'build':
return self.__dict__['build']
if name == 'available':
return self.__dict__['available']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'major':
self.__dict__['major'] = value
elif name == 'minor':
self.__dict__['minor'] = value
elif name == 'fix':
self.__dict__['fix'] = value
elif name == 'build':
self.__dict__['build'] = value
elif name == 'available':
self.__dict__['available'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_STATUS_MAJOR, self.__dict__['major'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_MINOR, self.__dict__['minor'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_FIX, self.__dict__['fix'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_BUILD, self.__dict__['build'])
submsg.AddBool(MSG_KEY_RESULT_STATUS_AVAILABLE, self.__dict__['available'])
mmsg.AddMessage(MSG_KEY_RESULT_STATUS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_STATUS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['major'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_MAJOR)
self.__dict__['minor'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_MINOR)
self.__dict__['fix'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_FIX)
self.__dict__['build'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_BUILD)
self.__dict__['available'] = submsg.FindBool(MSG_KEY_RESULT_STATUS_AVAILABLE)
class StringResult:
def __init__(self):
self.__dict__['str'] = ''
def __getattr__(self, name):
if name == 'str':
return self.__dict__['str']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'str':
self.__dict__['str'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddStringUtf8(MSG_KEY_RESULT_STRING_VALUE, self.__dict__['str'])
mmsg.AddMessage(MSG_KEY_RESULT_STRING, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_STRING, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['str'] = submsg.FindString(MSG_KEY_RESULT_STRING_VALUE)
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/FlAv/PyScripts/Lib/flav/flav/cmd/flavcontrol/type_Result.py
|
Python
|
unlicense
| 4,626 | 0.002162 |
# -*- coding:utf-8 -*-
#html_doc = '''<div><a href="http://www.weblio.jp/content/%E5%BD%A2%E5%AE%B9%E5%8B%95%E8%A9%9E" title="形容動詞の意味" class=crosslink>形容動詞</a>「<a href="http://www.weblio.jp/content/%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%83%95%E3%82%A3%E3%82%B7%E3%83%A3%E3%83%AB" title="アーティフィシャルの意味" class=crosslink>アーティフィシャル</a>だ」が、<a href="http://www.weblio.jp/content/%E6%8E%A5%E5%B0%BE%E8%AA%9E" title="接尾語の意味" class=crosslink>接尾語</a>「さ」により<a href="http://www.weblio.jp/content/%E4%BD%93%E8%A8%80" title="体言の意味" class=crosslink>体言</a>化した形。<br><br class=nhgktD><div><!--AVOID_CROSSLINK--><p class=nhgktL>終止形</p><p class=nhgktR>アーティフィシャルだ <a href="http://www.weblio.jp/content/%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%83%95%E3%82%A3%E3%82%B7%E3%83%A3%E3%83%AB" title="アーティフィシャル">» 「アーティフィシャル」の意味を調べる</a></p><!--/AVOID_CROSSLINK--><br class=clr></div>'''
#from bs4 import BeautifulSoup
#soup = BeautifulSoup(html_doc, 'html.parser')
#a = [text for text in soup.stripped_strings]
#print ''.join(a[:-1])
import socket
import urllib2
import traceback
import re
#import MySQLdb
import time
from bs4 import BeautifulSoup
#from complainDetail import *
timeout = 10
socket.setdefaulttimeout(timeout)
def fetchDetail(link, word):
tryNum = 3
tn = 0
while tn < tryNum:
details = []
try:
f = urllib2.urlopen(link)
content = f.read()
soup = BeautifulSoup(content, 'html.parser')
main = soup.find(attrs={'class':'Nhgkt'})
left = soup.find_all(attrs={'class':'nhgktL'})
right = soup.find_all(attrs={'class':'nhgktR'})
if(left):
for text in main.stripped_strings:
if(re.match(u'終止形$', text)!=None):break
details.append(text)
print '#'.join(details).encode('utf8'),
print '%',left[0].string.encode('utf8'), ':',
aList = right[0].find_all('a')
for a in aList:
print a['title'].encode('utf8'),
print
else:
for text in main.stripped_strings:
if(u'»' in text):break
details.append(text)
print '#'.join(details).encode('utf8')
break
except Exception,e:
print e
tn = tn + 1
#print url, " access error!"
#print "try ", tn, "time"
time.sleep(5)
if tn==tryNum:
#print "Cannot fetch page!"
return -1
return 0
if __name__ == "__main__":
wordsUrlList = open('verb_ok.txt')
for line in wordsUrlList.readlines():
l = line.split(' ')
link = l[0]
word = l[1].strip('\n')
print word, '%', link, '%',
if(fetchDetail(link, word)==-1):
print link, word, "ERROR."
print "Finished"
#indexUrl = "http://www.weblio.jp/category/dictionary/nhgkt/aa"
#f = urllib2.urlopen(indexUrl)
#content = f.read()
#soup = BeautifulSoup(content, 'html.parser')
#urlTable = soup.find(attrs={'class':'kanaAlpha'})
#aList = urlTable.find_all('a')
#for a in aList:
# print '"'+a['href']+'",'
|
pprivulet/DataScience
|
Dic/getDetail.py
|
Python
|
apache-2.0
| 3,537 | 0.015653 |
"""
Gateways connect to Announcer daemons, sending zlib compressed JSON
representations of market data. From here, the Announcer PUBs the messages
out to anyone SUBscribing. This could be Relays, or end-users.
"""
import logging
logger = logging.getLogger(__name__)
import gevent
import zmq.green as zmq
from emdr.conf import default_settings as settings
def run():
"""
Fires up the announcer process.
"""
context = zmq.Context()
receiver = context.socket(zmq.SUB)
receiver.setsockopt(zmq.SUBSCRIBE, '')
for binding in settings.ANNOUNCER_RECEIVER_BINDINGS:
# Gateways connect to the Announcer to PUB messages.
receiver.bind(binding)
sender = context.socket(zmq.PUB)
for binding in settings.ANNOUNCER_SENDER_BINDINGS:
# Announcers offer up the data via PUB.
sender.bind(binding)
def relay_worker(message):
"""
This is the worker function that re-sends the incoming messages out
to any subscribers.
:param str message: A JSON string to re-broadcast.
"""
sender.send(message)
logger.debug('Message announced.')
logger.info("Announcer is now listening for order data.")
while True:
gevent.spawn(relay_worker, receiver.recv())
|
gtaylor/EVE-Market-Data-Relay
|
emdr/daemons/announcer/main.py
|
Python
|
mit
| 1,274 | 0.00314 |
import pprint
import difflib
_MAX_LENGTH = 80
def pretty_diff(d1, d2):
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def sequence_diff(seq1, seq2, seq_type=None):
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
return 'First sequence is not a %s: %s' % \
(seq_type_name, safe_repr(seq1))
if not isinstance(seq2, seq_type):
return 'Second sequence is not a %s: %s' % \
(seq_type_name, safe_repr(seq2))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
return standardMsg + '\n' + diffMsg
def set_diff(set1, set2):
try:
difference1 = set1.difference(set2)
except TypeError, e:
return 'invalid type when attempting set difference: %s' % e
except AttributeError, e:
return 'first argument does not support set difference: %s' % e
try:
difference2 = set2.difference(set1)
except TypeError, e:
return 'invalid type when attempting set difference: %s' % e
except AttributeError, e:
return 'second argument does not support set difference: %s' % e
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
return '\n'.join(lines)
diff = None
if not isinstance(d1, type(d2)):
return diff
if d1 == d2:
return diff
if isinstance(d1, dict):
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
elif isinstance(d1, list):
diff = sequence_diff(d1, d2, seq_type=list)
elif isinstance(d1, tuple):
diff = sequence_diff(d1, d2, seq_type=tuple)
elif isinstance(d1, set):
diff = set_diff(d1, d2)
elif isinstance(d1, frozenset):
diff = set_diff(d1, d2)
return diff
class ItemDiff(object):
ITEM_NAME = "Item"
def __init__(self):
self.differences = []
def __nonzero__(self):
return bool(len(self.differences))
def __repr__(self):
return pprint.pformat(self.__dict__)
def add_difference(self, name, val_a, val_b, item_type=None, desc=None):
self.differences.append((name, val_a, val_b, item_type, desc))
def pprint(self):
msg = ""
for difference in self.differences:
name, a, b, item_type, desc = difference
msg += " %s" % name
if item_type:
msg += " [%s]" % item_type
if desc:
msg += " - %s" % desc
msg += "\n"
nice_diff = pretty_diff(a, b)
if isinstance(a, set):
tmp_a = a - b
b = b - a
a = tmp_a
msg += " [The difference is set -> Only extra items are shown]\n"
else:
msg += "\n"
msg += " 1. %s:\n" % self.ITEM_NAME
msg += " %s\n" % pprint.pformat(a, indent=8)
msg += " 2. %s:\n" % self.ITEM_NAME
msg += " %s\n" % pprint.pformat(b, indent=8)
if nice_diff:
msg += " Diff:\n"
msg += " %s\n" % "\n ".join(nice_diff.split('\n'))
return msg
class PackageDiff(ItemDiff):
ITEM_NAME = "Package"
class RepomdItemDiff(ItemDiff):
ITEM_NAME = "Value"
class MetadataDiff(object):
def __init__(self):
self.missing_items = set() # set of checksums
self.added_items = set() # set of checksums
self.changed_items = set() # set of checksums
self.items_diffs = {}
# self.packges_diffs keys are values from self.changed_items
# and values are PackageDiff objects.
def __nonzero__(self):
return bool(len(self.missing_items) or \
len(self.added_items) or \
len(self.changed_items))
def __repr__(self):
return pprint.pformat(self.__dict__)
def pprint(self, chksum_to_name_dict=None):
def translate(chksum):
if chksum_to_name_dict and chksum in chksum_to_name_dict:
return chksum_to_name_dict[chksum]
return None
msg = ""
if self.missing_items:
msg += " Missing items (items from A that are not available in B):\n"
for pkg in self.missing_items:
if translate(pkg):
msg += " %s (%s)\n" % (translate(pkg), pkg)
else:
msg += " %s\n" % pkg
if self.added_items:
msg += " Added items (items from B that are not available in A):\n"
for pkg in self.added_items:
if translate(pkg):
msg += " %s (%s)\n" % (translate(pkg), pkg)
else:
msg += " %s\n" % pkg
if self.changed_items:
msg += " Changed items:\n"
for pkg in self.changed_items:
if translate(pkg):
msg += " %s (%s)\n" % (translate(pkg), pkg)
else:
msg += " %s\n" % pkg
msg += self.items_diffs[pkg].pprint()
msg += " ----------------------------------------\n"
return msg
class OneRepoDiff(object):
def __init__(self, chksum_to_name_dict=None):
self.chksum_to_name_dict = chksum_to_name_dict
self.pri_diff = None
self.fil_diff = None
self.oth_diff = None
def __nonzero__(self):
return bool(self.pri_diff or self.fil_diff or self.oth_diff)
def __repr__(self):
return pprint.pformat(self.__dict__)
def pprint(self):
msg = ""
if self.pri_diff:
msg += "PRIMARY repodata are different:\n"
msg += self.pri_diff.pprint(chksum_to_name_dict=self.chksum_to_name_dict)
if self.fil_diff:
msg += "FILELISTS repodata are different:\n"
msg += self.fil_diff.pprint(chksum_to_name_dict=self.chksum_to_name_dict)
if self.oth_diff:
msg += "OTHER repodata are different:\n"
msg += self.oth_diff.pprint(chksum_to_name_dict=self.chksum_to_name_dict)
return msg
class CompleteRepoDiff(object):
def __init__(self):
self.xml_repo_diff = None
self.sql_repo_diff = None
self.md_diff = None
def __nonzero__(self):
return bool(self.xml_repo_diff or self.sql_repo_diff or self.md_diff)
def __repr__(self):
return pprint.pformat(self.__dict__)
def pprint(self):
msg = ""
if self.xml_repo_diff:
msg += "=== XML REPO DIFF: ===\n"
msg += self.xml_repo_diff.pprint()
if self.sql_repo_diff:
msg += "=== SQLITE REPO DIFF: ===\n"
msg += self.sql_repo_diff.pprint()
if self.md_diff:
msg += "=== REPOMD.XML DIFF: ===\n"
msg += self.md_diff.pprint()
return msg
|
Tojaj/yum-metadata-diff
|
yum_metadata_diff/diff_objects.py
|
Python
|
lgpl-2.1
| 10,928 | 0.001647 |
"""
WSGI config for sitefinder_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sitefinder_project.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Wrap werkzeug debugger if DEBUG is on
from django.conf import settings
if settings.DEBUG:
try:
import django.views.debug
import six
from werkzeug.debug import DebuggedApplication
def null_technical_500_response(request, exc_type, exc_value, tb):
six.reraise(exc_type, exc_value, tb)
django.views.debug.technical_500_response = null_technical_500_response
application = DebuggedApplication(application, evalex=True)
except ImportError:
pass
|
robjordan/sitefinder
|
src/sitefinder_project/wsgi.py
|
Python
|
mit
| 942 | 0.003185 |
# -*- coding: utf-8 -*-
# MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems
# Copyright (C) 2015-2017, Thomas Duriez (thomas.duriez@gmail.com)
# Copyright (C) 2015, Adrian Durán (adrianmdu@gmail.com)
# Copyright (C) 2015-2017, Ezequiel Torres Feyuk (ezequiel.torresfeyuk@gmail.com)
# Copyright (C) 2016-2017, Marco Germano Zbrun (marco.germano@intraway.com)
# Copyright (C) 2016-2017, Raúl Lopez Skuba (raulopez0@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from BaseCreation import BaseCreation
from MLC.db.mlc_repository import MLCRepository
class IndividualSelection(BaseCreation):
"""
Fill a Population with fixed Individuals.
selected_individuals: dictionary containing {Individual: positions inside
the first population}
fill_creator: creator used to fill empty positions.
Empty positions inside the Population will be completed using the neighbor individual,
"""
def __init__(self, selected_individuals, fill_creator):
BaseCreation.__init__(self)
self.__fill_creator = fill_creator
self.__selected_individuals = selected_individuals
self.__individuals = []
def create(self, gen_size):
self.__fill_creator.create(gen_size)
self.__individuals = self.__fill_creator.individuals()
# Add Individuals
for individual, positions in self.__selected_individuals.items():
for position in positions:
if position < gen_size:
individual_id, _ = MLCRepository.get_instance().add_individual(individual)
self.__individuals[position] = (position, individual_id)
def individuals(self):
return self.__individuals
|
MachineLearningControl/OpenMLC-Python
|
MLC/Population/Creation/IndividualSelection.py
|
Python
|
gpl-3.0
| 2,329 | 0.001719 |
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
from biggraphite.drivers import _utils
class CountDownTest(unittest.TestCase):
_COUNT = 42
def setUp(self):
self.on_zero = mock.Mock()
self.count_down = _utils.CountDown(self._COUNT, self.on_zero)
def test_on_failure(self):
exc = Exception()
self.count_down.on_failure(exc)
self.on_zero.assert_called_once()
# Failing again should not call the callback again.
self.count_down.on_failure(exc)
self.on_zero.assert_called_once()
def test_on_result(self):
result = "whatever this is not used"
for _ in xrange(self._COUNT - 1):
self.count_down.on_result(result)
self.on_zero.assert_not_called()
self.count_down.on_result(result)
self.on_zero.assert_called_with(None)
if __name__ == "__main__":
unittest.main()
|
natbraun/biggraphite
|
tests/test_drivers_utils.py
|
Python
|
apache-2.0
| 1,554 | 0 |
#!/usr/bin/python3
import cgi
import cgitb
import datetime
import json
import os
import re
import requests
import subprocess
import sys
import time
from bmdjson import check_address
print("Content-Type: text/plain\n")
print("testing keybase")
print()
print("PASS:")
signature = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdyH7rvq v5weRa0zkSjiJmm 8dzt8BnSF7QPfAy AmWtlYORgWXP5hk aXmzZHPBPoIRpYD qsXcl0JX7RT65NS KLnnW8kwG9ujBNt r2bd6GNLnp4xVMr btCVAG2TMDpNhVf yXSbZmzQDnE6mIM Y4oS4YGVbw244Je Bc7lmO6225Gu6tj HgIwRnLz975GBZU Bc3GLDyRpvTEGXr AzRtx0gMk2FzHxf 2oimZKG. END KEYBASE SALTPACK SIGNED MESSAGE."
sig_result = check_address(signature)
for k, v in sorted(sig_result.items(), key=lambda x: x[0]):
# is saying the leftmost of the pair k,v -- alphabetic sorting of keys
# now sig_addr, sig_by, then sig_good -- display bugged me
print("[" + str(k) + "] = ", v)
print()
print("FAIL: Bad String")
signature2 = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdy27rvq v5weRa0zkDL3e9k D1e7HgTLY1WFWdi UfZI1s56lquWUJu lBvdIblMbFGwTGa M9oYSI9cU7KjGW9 2JOGghIjQX3Fqw5 xsvEpPo9pEuA25J Ut0J0Fur0C3F8oZ n50PAvVWVmb0iEP 5MNUBEMHMo5DTtF OhK66v3FFwu0qJe 8R35q5A5ycevVsR pdaOBQQ1VGcNIlF 9YU6a0Wi5kd85JH rjSupUZ. END KEYBASE SALTPACK SIGNED MESSAGE."
sig_result = check_address(signature2)
for k, v in sorted(sig_result.items(), key=lambda x: x[0]):
# is saying the leftmost of the pair k,v -- alphabetic sorting of keys
# now sig_addr, sig_by, then sig_good -- display bugged me
print("[" + str(k) + "] = ", v)
print()
print("end.")
|
joezippy/paywall
|
test-keybase.py
|
Python
|
apache-2.0
| 1,556 | 0.001928 |
# -*- coding: utf-8 -*-
# Copyright 2015-2017 Rooms For (Hong Kong) Limted T/A OSCG
# Copyright 2017 eHanse
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import models
|
rfhk/awo-custom
|
account_invoice_line_view_oaw/__init__.py
|
Python
|
lgpl-3.0
| 199 | 0 |
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Emits a C++ file to be compiled and linked into libv8 to support postmortem
# debugging tools. Most importantly, this tool emits constants describing V8
# internals:
#
# v8dbg_type_CLASS__TYPE = VALUE Describes class type values
# v8dbg_class_CLASS__FIELD__TYPE = OFFSET Describes class fields
# v8dbg_parent_CLASS__PARENT Describes class hierarchy
# v8dbg_frametype_NAME = VALUE Describes stack frame values
# v8dbg_off_fp_NAME = OFFSET Frame pointer offsets
# v8dbg_prop_NAME = OFFSET Object property offsets
# v8dbg_NAME = VALUE Miscellaneous values
#
# These constants are declared as global integers so that they'll be present in
# the generated libv8 binary.
#
import re
import sys
#
# Miscellaneous constants such as tags and masks used for object identification,
# enumeration values used as indexes in internal tables, etc..
#
consts_misc = [
{ 'name': 'FirstNonstringType', 'value': 'FIRST_NONSTRING_TYPE' },
{ 'name': 'APIObjectType', 'value': 'JS_API_OBJECT_TYPE' },
{ 'name': 'SpecialAPIObjectType', 'value': 'JS_SPECIAL_API_OBJECT_TYPE' },
{ 'name': 'IsNotStringMask', 'value': 'kIsNotStringMask' },
{ 'name': 'StringTag', 'value': 'kStringTag' },
{ 'name': 'NotStringTag', 'value': 'kNotStringTag' },
{ 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
{ 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
{ 'name': 'OneByteStringTag', 'value': 'kOneByteStringTag' },
{ 'name': 'StringRepresentationMask',
'value': 'kStringRepresentationMask' },
{ 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
{ 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
{ 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
{ 'name': 'OddballNull', 'value': 'Oddball::kNull' },
{ 'name': 'OddballArgumentsMarker', 'value': 'Oddball::kArgumentsMarker' },
{ 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' },
{ 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' },
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
{ 'name': 'OddballException', 'value': 'Oddball::kException' },
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'DATA' },
{ 'name': 'prop_type_const_field',
'value': 'DATA_CONSTANT' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',
'value': 'PropertyDetails::FieldIndexField::kMask' },
{ 'name': 'prop_index_shift',
'value': 'PropertyDetails::FieldIndexField::kShift' },
{ 'name': 'prop_representation_mask',
'value': 'PropertyDetails::RepresentationField::kMask' },
{ 'name': 'prop_representation_shift',
'value': 'PropertyDetails::RepresentationField::kShift' },
{ 'name': 'prop_representation_integer8',
'value': 'Representation::Kind::kInteger8' },
{ 'name': 'prop_representation_uinteger8',
'value': 'Representation::Kind::kUInteger8' },
{ 'name': 'prop_representation_integer16',
'value': 'Representation::Kind::kInteger16' },
{ 'name': 'prop_representation_uinteger16',
'value': 'Representation::Kind::kUInteger16' },
{ 'name': 'prop_representation_smi',
'value': 'Representation::Kind::kSmi' },
{ 'name': 'prop_representation_integer32',
'value': 'Representation::Kind::kInteger32' },
{ 'name': 'prop_representation_double',
'value': 'Representation::Kind::kDouble' },
{ 'name': 'prop_representation_heapobject',
'value': 'Representation::Kind::kHeapObject' },
{ 'name': 'prop_representation_tagged',
'value': 'Representation::Kind::kTagged' },
{ 'name': 'prop_representation_external',
'value': 'Representation::Kind::kExternal' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kDescriptorKey' },
{ 'name': 'prop_desc_details',
'value': 'DescriptorArray::kDescriptorDetails' },
{ 'name': 'prop_desc_value',
'value': 'DescriptorArray::kDescriptorValue' },
{ 'name': 'prop_desc_size',
'value': 'DescriptorArray::kDescriptorSize' },
{ 'name': 'elements_fast_holey_elements',
'value': 'FAST_HOLEY_ELEMENTS' },
{ 'name': 'elements_fast_elements',
'value': 'FAST_ELEMENTS' },
{ 'name': 'elements_dictionary_elements',
'value': 'DICTIONARY_ELEMENTS' },
{ 'name': 'bit_field2_elements_kind_mask',
'value': 'Map::ElementsKindBits::kMask' },
{ 'name': 'bit_field2_elements_kind_shift',
'value': 'Map::ElementsKindBits::kShift' },
{ 'name': 'bit_field3_dictionary_map_shift',
'value': 'Map::DictionaryMap::kShift' },
{ 'name': 'bit_field3_number_of_own_descriptors_mask',
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
{ 'name': 'off_fp_constant_pool',
'value': 'StandardFrameConstants::kConstantPoolOffset' },
{ 'name': 'off_fp_function',
'value': 'JavaScriptFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
'value': 'JavaScriptFrameConstants::kLastParameterOffset' },
{ 'name': 'scopeinfo_idx_nparams',
'value': 'ScopeInfo::kParameterCount' },
{ 'name': 'scopeinfo_idx_nstacklocals',
'value': 'ScopeInfo::kStackLocalCount' },
{ 'name': 'scopeinfo_idx_ncontextlocals',
'value': 'ScopeInfo::kContextLocalCount' },
{ 'name': 'scopeinfo_idx_first_vars',
'value': 'ScopeInfo::kVariablePartIndex' },
{ 'name': 'sharedfunctioninfo_start_position_mask',
'value': 'SharedFunctionInfo::kStartPositionMask' },
{ 'name': 'sharedfunctioninfo_start_position_shift',
'value': 'SharedFunctionInfo::kStartPositionShift' },
{ 'name': 'jsarray_buffer_was_neutered_mask',
'value': 'JSArrayBuffer::WasNeutered::kMask' },
{ 'name': 'jsarray_buffer_was_neutered_shift',
'value': 'JSArrayBuffer::WasNeutered::kShift' },
{ 'name': 'context_idx_closure',
'value': 'Context::CLOSURE_INDEX' },
{ 'name': 'context_idx_native',
'value': 'Context::NATIVE_CONTEXT_INDEX' },
{ 'name': 'context_idx_prev',
'value': 'Context::PREVIOUS_INDEX' },
{ 'name': 'context_idx_ext',
'value': 'Context::EXTENSION_INDEX' },
{ 'name': 'context_min_slots',
'value': 'Context::MIN_CONTEXT_SLOTS' },
{ 'name': 'namedictionaryshape_prefix_size',
'value': 'NameDictionaryShape::kPrefixSize' },
{ 'name': 'namedictionaryshape_entry_size',
'value': 'NameDictionaryShape::kEntrySize' },
{ 'name': 'globaldictionaryshape_entry_size',
'value': 'GlobalDictionaryShape::kEntrySize' },
{ 'name': 'namedictionary_prefix_start_index',
'value': 'NameDictionary::kPrefixStartIndex' },
{ 'name': 'seedednumberdictionaryshape_prefix_size',
'value': 'SeededNumberDictionaryShape::kPrefixSize' },
{ 'name': 'seedednumberdictionaryshape_entry_size',
'value': 'SeededNumberDictionaryShape::kEntrySize' },
{ 'name': 'unseedednumberdictionaryshape_prefix_size',
'value': 'UnseededNumberDictionaryShape::kPrefixSize' },
{ 'name': 'unseedednumberdictionaryshape_entry_size',
'value': 'UnseededNumberDictionaryShape::kEntrySize' }
];
#
# The following useful fields are missing accessors, so we define fake ones.
# Please note that extra accessors should _only_ be added to expose offsets that
# can be used to access actual V8 objects' properties. They should not be added
# for exposing other values. For instance, enumeration values or class'
# constants should be exposed by adding an entry in the "consts_misc" table, not
# in this "extras_accessors" table.
#
extras_accessors = [
'JSFunction, context, Context, kContextOffset',
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'JSObject, internal_fields, uintptr_t, kHeaderSize',
'FixedArray, data, uintptr_t, kHeaderSize',
'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_attributes, int, kInstanceAttributesOffset',
'Map, inobject_properties_or_constructor_function_index, int, kInObjectPropertiesOrConstructorFunctionIndexOffset',
'Map, instance_size, int, kInstanceSizeOffset',
'Map, bit_field, char, kBitFieldOffset',
'Map, bit_field2, char, kBitField2Offset',
'Map, bit_field3, int, kBitField3Offset',
'Map, prototype, Object, kPrototypeOffset',
'Oddball, kind_offset, int, kKindOffset',
'HeapNumber, value, double, kValueOffset',
'ConsString, first, String, kFirstOffset',
'ConsString, second, String, kSecondOffset',
'ExternalString, resource, Object, kResourceOffset',
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
'SharedFunctionInfo, scope_info, ScopeInfo, kScopeInfoOffset',
'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
];
#
# The following is a whitelist of classes we expect to find when scanning the
# source code. This list is not exhaustive, but it's still useful to identify
# when this script gets out of sync with the source. See load_objects().
#
expected_classes = [
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
'SeqOneByteString', 'SharedFunctionInfo'
];
#
# The following structures store high-level representations of the structures
# for which we're going to emit descriptive constants.
#
types = {}; # set of all type names
typeclasses = {}; # maps type names to corresponding class names
klasses = {}; # known classes, including parents
fields = []; # field declarations
header = '''
/*
* This file is generated by %s. Do not edit directly.
*/
#include "src/v8.h"
#include "src/frames.h"
#include "src/frames-inl.h" /* for architecture-specific frame constants */
#include "src/contexts.h"
using namespace v8::internal;
extern "C" {
/* stack frame constants */
#define FRAME_CONST(value, klass) \
int v8dbg_frametype_##klass = StackFrame::value;
STACK_FRAME_TYPE_LIST(FRAME_CONST)
#undef FRAME_CONST
''' % sys.argv[0];
footer = '''
}
'''
#
# Get the base class
#
def get_base_class(klass):
if (klass == 'Object'):
return klass;
if (not (klass in klasses)):
return None;
k = klasses[klass];
return get_base_class(k['parent']);
#
# Loads class hierarchy and type information from "objects.h".
#
def load_objects():
objfilename = sys.argv[2];
objfile = open(objfilename, 'r');
in_insttype = False;
typestr = '';
#
# Construct a dictionary for the classes we're sure should be present.
#
checktypes = {};
for klass in expected_classes:
checktypes[klass] = True;
#
# Iterate objects.h line-by-line to collect type and class information.
# For types, we accumulate a string representing the entire InstanceType
# enum definition and parse it later because it's easier to do so
# without the embedded newlines.
#
for line in objfile:
if (line.startswith('enum InstanceType {')):
in_insttype = True;
continue;
if (in_insttype and line.startswith('};')):
in_insttype = False;
continue;
line = re.sub('//.*', '', line.strip());
if (in_insttype):
typestr += line;
continue;
match = re.match('class (\w[^:]*)(: public (\w[^{]*))?\s*{\s*',
line);
if (match):
klass = match.group(1).strip();
pklass = match.group(3);
if (pklass):
pklass = pklass.strip();
klasses[klass] = { 'parent': pklass };
#
# Process the instance type declaration.
#
entries = typestr.split(',');
for entry in entries:
types[re.sub('\s*=.*', '', entry).lstrip()] = True;
#
# Infer class names for each type based on a systematic transformation.
# For example, "JS_FUNCTION_TYPE" becomes "JSFunction". We find the
# class for each type rather than the other way around because there are
# fewer cases where one type maps to more than one class than the other
# way around.
#
for type in types:
#
# Symbols and Strings are implemented using the same classes.
#
usetype = re.sub('SYMBOL_', 'STRING_', type);
#
# REGEXP behaves like REG_EXP, as in JS_REGEXP_TYPE => JSRegExp.
#
usetype = re.sub('_REGEXP_', '_REG_EXP_', usetype);
#
# Remove the "_TYPE" suffix and then convert to camel case,
# except that a "JS" prefix remains uppercase (as in
# "JS_FUNCTION_TYPE" => "JSFunction").
#
if (not usetype.endswith('_TYPE')):
continue;
usetype = usetype[0:len(usetype) - len('_TYPE')];
parts = usetype.split('_');
cctype = '';
if (parts[0] == 'JS'):
cctype = 'JS';
start = 1;
else:
cctype = '';
start = 0;
for ii in range(start, len(parts)):
part = parts[ii];
cctype += part[0].upper() + part[1:].lower();
#
# Mapping string types is more complicated. Both types and
# class names for Strings specify a representation (e.g., Seq,
# Cons, External, or Sliced) and an encoding (TwoByte/OneByte),
# In the simplest case, both of these are explicit in both
# names, as in:
#
# EXTERNAL_ONE_BYTE_STRING_TYPE => ExternalOneByteString
#
# However, either the representation or encoding can be omitted
# from the type name, in which case "Seq" and "TwoByte" are
# assumed, as in:
#
# STRING_TYPE => SeqTwoByteString
#
# Additionally, sometimes the type name has more information
# than the class, as in:
#
# CONS_ONE_BYTE_STRING_TYPE => ConsString
#
# To figure this out dynamically, we first check for a
# representation and encoding and add them if they're not
# present. If that doesn't yield a valid class name, then we
# strip out the representation.
#
if (cctype.endswith('String')):
if (cctype.find('Cons') == -1 and
cctype.find('External') == -1 and
cctype.find('Sliced') == -1):
if (cctype.find('OneByte') != -1):
cctype = re.sub('OneByteString$',
'SeqOneByteString', cctype);
else:
cctype = re.sub('String$',
'SeqString', cctype);
if (cctype.find('OneByte') == -1):
cctype = re.sub('String$', 'TwoByteString',
cctype);
if (not (cctype in klasses)):
cctype = re.sub('OneByte', '', cctype);
cctype = re.sub('TwoByte', '', cctype);
#
# Despite all that, some types have no corresponding class.
#
if (cctype in klasses):
typeclasses[type] = cctype;
if (cctype in checktypes):
del checktypes[cctype];
if (len(checktypes) > 0):
for klass in checktypes:
print('error: expected class \"%s\" not found' % klass);
sys.exit(1);
#
# For a given macro call, pick apart the arguments and return an object
# describing the corresponding output constant. See load_fields().
#
def parse_field(call):
# Replace newlines with spaces.
for ii in range(0, len(call)):
if (call[ii] == '\n'):
call[ii] == ' ';
idx = call.find('(');
kind = call[0:idx];
rest = call[idx + 1: len(call) - 1];
args = re.split('\s*,\s*', rest);
consts = [];
if (kind == 'ACCESSORS' or kind == 'ACCESSORS_GCSAFE'):
klass = args[0];
field = args[1];
dtype = args[2];
offset = args[3];
return ({
'name': 'class_%s__%s__%s' % (klass, field, dtype),
'value': '%s::%s' % (klass, offset)
});
assert(kind == 'SMI_ACCESSORS' or kind == 'ACCESSORS_TO_SMI');
klass = args[0];
field = args[1];
offset = args[2];
return ({
'name': 'class_%s__%s__%s' % (klass, field, 'SMI'),
'value': '%s::%s' % (klass, offset)
});
#
# Load field offset information from objects-inl.h.
#
def load_fields():
inlfilename = sys.argv[3];
inlfile = open(inlfilename, 'r');
#
# Each class's fields and the corresponding offsets are described in the
# source by calls to macros like "ACCESSORS" (and friends). All we do
# here is extract these macro invocations, taking into account that they
# may span multiple lines and may contain nested parentheses. We also
# call parse_field() to pick apart the invocation.
#
prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE',
'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ];
current = '';
opens = 0;
for line in inlfile:
if (opens > 0):
# Continuation line
for ii in range(0, len(line)):
if (line[ii] == '('):
opens += 1;
elif (line[ii] == ')'):
opens -= 1;
if (opens == 0):
break;
current += line[0:ii + 1];
continue;
for prefix in prefixes:
if (not line.startswith(prefix + '(')):
continue;
if (len(current) > 0):
fields.append(parse_field(current));
current = '';
for ii in range(len(prefix), len(line)):
if (line[ii] == '('):
opens += 1;
elif (line[ii] == ')'):
opens -= 1;
if (opens == 0):
break;
current += line[0:ii + 1];
if (len(current) > 0):
fields.append(parse_field(current));
current = '';
for body in extras_accessors:
fields.append(parse_field('ACCESSORS(%s)' % body));
#
# Emit a block of constants.
#
def emit_set(out, consts):
# Fix up overzealous parses. This could be done inside the
# parsers but as there are several, it's easiest to do it here.
ws = re.compile('\s+')
for const in consts:
name = ws.sub('', const['name'])
value = ws.sub('', str(const['value'])) # Can be a number.
out.write('int v8dbg_%s = %s;\n' % (name, value))
out.write('\n');
#
# Emit the whole output file.
#
def emit_config():
out = file(sys.argv[1], 'w');
out.write(header);
out.write('/* miscellaneous constants */\n');
emit_set(out, consts_misc);
out.write('/* class type information */\n');
consts = [];
keys = typeclasses.keys();
keys.sort();
for typename in keys:
klass = typeclasses[typename];
consts.append({
'name': 'type_%s__%s' % (klass, typename),
'value': typename
});
emit_set(out, consts);
out.write('/* class hierarchy information */\n');
consts = [];
keys = klasses.keys();
keys.sort();
for klassname in keys:
pklass = klasses[klassname]['parent'];
bklass = get_base_class(klassname);
if (bklass != 'Object'):
continue;
if (pklass == None):
continue;
consts.append({
'name': 'parent_%s__%s' % (klassname, pklass),
'value': 0
});
emit_set(out, consts);
out.write('/* field information */\n');
emit_set(out, fields);
out.write(footer);
if (len(sys.argv) < 4):
print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0]);
sys.exit(2);
load_objects();
load_fields();
emit_config();
|
joerg84/arangodb
|
3rdParty/V8/v5.7.0.0/tools/gen-postmortem-metadata.py
|
Python
|
apache-2.0
| 25,114 | 0.01537 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLImportDataOperator,
AutoMLTrainModelOperator,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_SENTIMENT_BUCKET = os.environ.get("GCP_AUTOML_SENTIMENT_BUCKET", "gs://INVALID BUCKET NAME")
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_sentiment_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_text_sentiment_dataset",
"text_sentiment_dataset_metadata": {"sentiment_max": 10},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_SENTIMENT_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Text Sentiment
with models.DAG(
"example_automl_text_sentiment",
schedule_interval=None, # Override to match your needs
start_date=datetime(2021, 1, 1),
catchup=False,
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = create_dataset_task.output['dataset_id']
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION)
model_id = create_model.output['model_id']
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
import_dataset_task >> create_model
delete_model_task >> delete_datasets_task
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> create_model
# create_model >> delete_model_task
# create_dataset_task >> delete_datasets_task
|
Acehaidrey/incubator-airflow
|
airflow/providers/google/cloud/example_dags/example_automl_nl_text_sentiment.py
|
Python
|
apache-2.0
| 3,589 | 0.001115 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import (get_location_type,
get_enum_type,
tags_type,
get_three_state_flag)
from azure.cli.command_modules.ams._completers import (get_role_definition_name_completion_list,
get_cdn_provider_completion_list,
get_default_streaming_policies_completion_list,
get_presets_definition_name_completion_list,
get_allowed_languages_for_preset_completion_list,
get_protocols_completion_list,
get_token_type_completion_list,
get_fairplay_rentalandlease_completion_list,
get_token_completion_list,
get_mru_type_completion_list,
get_encoding_types_list,
get_allowed_resolutions_completion_list,
get_allowed_transcription_languages,
get_allowed_analysis_modes,
get_stretch_mode_types_list,
get_storage_authentication_allowed_values_list)
from azure.cli.command_modules.ams._validators import (validate_storage_account_id,
datetime_format,
validate_correlation_data,
validate_token_claim,
validate_output_assets,
validate_archive_window_length,
validate_key_frame_interval_duration)
from azure.mgmt.media.models import (Priority, AssetContainerPermission, LiveEventInputProtocol, StreamOptionsFlag, OnErrorType, InsightsType)
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], id_part='name', help='The name of the Azure Media Services account.', metavar='NAME')
account_name_arg_type = CLIArgumentType(options_list=['--account-name', '-a'], id_part='name', help='The name of the Azure Media Services account.', metavar='ACCOUNT_NAME')
storage_account_arg_type = CLIArgumentType(options_list=['--storage-account'], validator=validate_storage_account_id, metavar='STORAGE_NAME')
password_arg_type = CLIArgumentType(options_list=['--password', '-p'], metavar='PASSWORD_NAME')
transform_name_arg_type = CLIArgumentType(options_list=['--transform-name', '-t'], metavar='TRANSFORM_NAME')
expiry_arg_type = CLIArgumentType(options_list=['--expiry'], type=datetime_format, metavar='EXPIRY_TIME')
default_policy_name_arg_type = CLIArgumentType(options_list=['--content-key-policy-name'], help='The default content key policy name used by the streaming locator.', metavar='DEFAULT_CONTENT_KEY_POLICY_NAME')
archive_window_length_arg_type = CLIArgumentType(options_list=['--archive-window-length'], validator=validate_archive_window_length, metavar='ARCHIVE_WINDOW_LENGTH')
key_frame_interval_duration_arg_type = CLIArgumentType(options_list=['--key-frame-interval-duration'], validator=validate_archive_window_length, metavar='ARCHIVE_WINDOW_LENGTH')
correlation_data_type = CLIArgumentType(validator=validate_correlation_data, help="Space-separated correlation data in 'key[=value]' format. This customer provided data will be returned in Job and JobOutput state events.", nargs='*', metavar='CORRELATION_DATA')
token_claim_type = CLIArgumentType(validator=validate_token_claim, help="Space-separated required token claims in '[key=value]' format.", nargs='*', metavar='ASYMMETRIC TOKEN CLAIMS')
output_assets_type = CLIArgumentType(validator=validate_output_assets, nargs='*', help="Space-separated assets in 'assetName=label' format. An asset without label can be sent like this: 'assetName='", metavar='OUTPUT_ASSETS')
with self.argument_context('ams') as c:
c.argument('account_name', name_arg_type)
with self.argument_context('ams account') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group, required=False)
c.argument('tags', arg_type=tags_type)
with self.argument_context('ams account create') as c:
c.argument('storage_account', storage_account_arg_type,
help='The name or resource ID of the primary storage account to attach to the Azure Media Services account. The storage account MUST be in the same Azure subscription as the Media Services account. It is strongly recommended that the storage account be in the same resource group as the Media Services account. Blob only accounts are not allowed as primary.')
c.argument('assign_identity', options_list=['--mi-system-assigned'], action='store_true', help='Set the system managed identity on the media services account.')
with self.argument_context('ams account check-name') as c:
c.argument('account_name', options_list=['--name', '-n'], id_part=None,
help='The name of the Azure Media Services account.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
with self.argument_context('ams account mru') as c:
c.argument('type', help='Speed of reserved processing units. The cost of media encoding depends on the pricing tier you choose. See https://azure.microsoft.com/pricing/details/media-services/ for further details. Allowed values: {}.'.format(", ".join(get_mru_type_completion_list())))
c.argument('count', type=int, help='The number of the encoding reserved units that you want to be provisioned for this account for concurrent tasks (one unit equals one task).')
with self.argument_context('ams account storage') as c:
c.argument('account_name', account_name_arg_type)
c.argument('storage_account', name_arg_type,
help='The name or resource ID of the secondary storage account to detach from the Azure Media Services account.',
validator=validate_storage_account_id)
with self.argument_context('ams account storage sync-storage-keys') as c:
c.argument('storage_account_id', required=True, help="The storage account Id.")
with self.argument_context('ams account storage set-authentication') as c:
c.argument('storage_auth', arg_type=get_enum_type(get_storage_authentication_allowed_values_list()), help='The type of authentication for the storage account associated with the media services account.')
with self.argument_context('ams account sp') as c:
c.argument('account_name', account_name_arg_type)
c.argument('sp_name', name_arg_type,
help="The app name or app URI to associate the RBAC with. If not present, a default name like '{amsaccountname}-access-sp' will be generated.")
c.argument('new_sp_name', help="The new app name or app URI to update the RBAC with.")
c.argument('sp_password', password_arg_type,
help="The password used to log in. Also known as 'Client Secret'. If not present, a random secret will be generated.")
c.argument('role', help='The role of the service principal.', completer=get_role_definition_name_completion_list)
c.argument('xml', action='store_true', help='Enables xml output format.')
c.argument('years', help='Number of years for which the secret will be valid. Default: 1 year.', type=int, default=None)
with self.argument_context('ams account encryption') as c:
c.argument('account_name', account_name_arg_type)
c.argument('key_type', help='The encryption key source (provider). Allowed values: SystemKey, CustomerKey.', required=True)
c.argument('key_identifier', help='The URL of the Key Vault key used to encrypt the account. The key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key without a version (for example https://vault/keys/mykey).')
c.argument('current_key_id', help='The current key used to encrypt the Media Services account, including the key version.')
with self.argument_context('ams transform') as c:
c.argument('account_name', account_name_arg_type)
c.argument('transform_name', name_arg_type, id_part='child_name_1',
help='The name of the transform.')
c.argument('preset', help='Preset that describes the operations that will be used to modify, transcode, or extract insights from the source file to generate the transform output. Allowed values: {}. In addition to the allowed values, you can also pass a path to a custom Standard Encoder preset JSON file. See https://docs.microsoft.com/rest/api/media/transforms/createorupdate#standardencoderpreset for further details on the settings to use to build a custom preset.'
.format(", ".join(get_presets_definition_name_completion_list())))
c.argument('insights_to_extract', arg_group='Video Analyzer', arg_type=get_enum_type(InsightsType), help='The type of insights to be extracted. If not set then the type will be selected based on the content type. If the content is audio only then only audio insights will be extracted and if it is video only video insights will be extracted.')
c.argument('video_analysis_mode', arg_group='Video Analyzer', help='Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen. Allowed values: {}'.format(", ".join(get_allowed_analysis_modes())))
c.argument('audio_language', arg_group='Audio/Video Analyzer', help='The language for the audio payload in the input using the BCP-47 format of \"language tag-region\" (e.g: en-US). If not specified, automatic language detection would be employed. This feature currently supports English, Chinese, French, German, Italian, Japanese, Spanish, Russian, and Portuguese. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to English. Allowed values: {}.'
.format(", ".join(get_allowed_languages_for_preset_completion_list())))
c.argument('audio_analysis_mode', arg_group='Audio/Video Analyzer', help='Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen. Allowed values: {}.'.format(", ".join(get_allowed_analysis_modes())))
c.argument('resolution', arg_group='Face Detector', help='Specifies the maximum resolution at which your video is analyzed. The default behavior is "SourceResolution," which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to "StandardDefinition" will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Allowed values: {}.'
.format(", ".join(get_allowed_resolutions_completion_list())))
c.argument('relative_priority', arg_type=get_enum_type(Priority), help='Sets the relative priority of the transform outputs within a transform. This sets the priority that the service uses for processing TransformOutputs. The default priority is Normal.')
c.argument('on_error', arg_type=get_enum_type(OnErrorType), help="A Transform can define more than one output. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 'ContinueJob'. The default is 'StopProcessingJob'.")
c.argument('description', help='The description of the transform.')
with self.argument_context('ams transform output remove') as c:
c.argument('output_index', help='The element index of the output to remove.',
type=int, default=None)
with self.argument_context('ams transform list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams asset') as c:
c.argument('account_name', account_name_arg_type)
c.argument('asset_name', name_arg_type, id_part='child_name_1',
help='The name of the asset.')
with self.argument_context('ams asset list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams asset create') as c:
c.argument('alternate_id', help='The alternate id of the asset.')
c.argument('description', help='The asset description.')
c.argument('asset_name', name_arg_type, help='The name of the asset.')
c.argument('storage_account', help='The name of the storage account.')
c.argument('container', help='The name of the asset blob container.')
with self.argument_context('ams asset update') as c:
c.argument('alternate_id', help='The alternate id of the asset.')
c.argument('description', help='The asset description.')
with self.argument_context('ams asset get-sas-urls') as c:
c.argument('permissions', arg_type=get_enum_type(AssetContainerPermission),
help='The permissions to set on the SAS URL.')
c.argument('expiry_time', expiry_arg_type, help="Specifies the UTC datetime (Y-m-d'T'H:M:S'Z') at which the SAS becomes invalid. This must be less than 24 hours from the current time.")
with self.argument_context('ams asset-filter') as c:
c.argument('account_name', account_name_arg_type)
c.argument('asset_name', help='The name of the asset.', id_part='child_name_1')
c.argument('filter_name', name_arg_type, id_part='child_name_2', help='The name of the asset filter.')
c.argument('start_timestamp', arg_group='Presentation Time Range',
help='Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is the timescale, so a startTimestamp of 150000000 would be for 15 seconds. Use startTimestamp and endTimestampp to trim the fragments that will be in the playlist (manifest). For example, startTimestamp=40000000 and endTimestamp=100000000 using the default timescale will generate a playlist that contains fragments from between 4 seconds and 10 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest.')
c.argument('end_timestamp', arg_group='Presentation Time Range',
help='Applies to Video on Demand (VoD).For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD.This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is the timescale, so an endTimestamp of 1800000000 would be for 3 minutes.Use startTimestamp and endTimestamp to trim the fragments that will be in the playlist (manifest).For example, startTimestamp=40000000 and endTimestamp=100000000 using the default timescale will generate a playlist that contains fragments from between 4 seconds and 10 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest.')
c.argument('presentation_window_duration', arg_group='Presentation Time Range',
help='Applies to Live Streaming only.Use presentationWindowDuration to apply a sliding window of fragments to include in a playlist.The unit for this property is timescale (see below).For example, set presentationWindowDuration=1200000000 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds.')
c.argument('live_backoff_duration', arg_group='Presentation Time Range',
help='Applies to Live Streaming only. This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit for this property is timescale (see below). The maximum live back off duration is 300 seconds (3000000000). For example, a value of 2000000000 means that the latest available content is 20 seconds delayed from the real live edge.')
c.argument('timescale', arg_group='Presentation Time Range',
help='Applies to all timestamps and durations in a Presentation Time Range, specified as the number of increments in one second.Default is 10000000 - ten million increments in one second, where each increment would be 100 nanoseconds long. For example, if you want to set a startTimestamp at 30 seconds, you would use a value of 300000000 when using the default timescale.')
c.argument('force_end_timestamp', arg_group='Presentation Time Range', arg_type=get_three_state_flag(),
help='Applies to Live Streaming only. Indicates whether the endTimestamp property must be present. If true, endTimestamp must be specified or a bad request code is returned. Allowed values: false, true.')
c.argument('bitrate', help='The first quality bitrate.', deprecate_info=c.deprecate(target='--bitrate', redirect='--first-quality', hide=True))
c.argument('first_quality', help='The first quality (lowest) bitrate to include in the manifest.')
c.argument('tracks', help='The JSON representing the track selections. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/assetfilters/createorupdate#filtertrackselection')
with self.argument_context('ams asset-filter list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams job') as c:
c.argument('account_name', account_name_arg_type)
c.argument('transform_name', transform_name_arg_type, id_part='child_name_1',
help='The name of the transform.')
c.argument('job_name', name_arg_type, id_part='child_name_2',
help='The name of the job.')
c.argument('description', help='The job description.')
c.argument('priority', arg_type=get_enum_type(Priority),
help='The priority with which the job should be processed.')
with self.argument_context('ams job list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams job start') as c:
c.argument('correlation_data', arg_type=correlation_data_type)
c.argument('input_asset_name',
arg_group='Asset Job Input',
help='The name of the input asset.')
c.argument('output_assets', arg_type=output_assets_type)
c.argument('base_uri',
arg_group='Http Job Input',
help='Base uri for http job input. It will be concatenated with provided file names. If no base uri is given, then the provided file list is assumed to be fully qualified uris.')
c.argument('files',
nargs='+',
help='Space-separated list of files. It can be used to tell the service to only use the files specified from the input asset.')
c.argument('label', help="A label that is assigned to a Job Input that is used to satisfy a reference used in the Transform. For example, a Transform can be authored to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'.")
c.argument('correlation_data', arg_type=correlation_data_type)
with self.argument_context('ams job cancel') as c:
c.argument('delete', action='store_true', help='Delete the job being cancelled.')
with self.argument_context('ams content-key-policy') as c:
c.argument('account_name', account_name_arg_type)
c.argument('content_key_policy_name', name_arg_type, id_part='child_name_1',
help='The content key policy name.')
c.argument('description', help='The content key policy description.')
c.argument('clear_key_configuration',
action='store_true',
arg_group='Clear Key Configuration (AES Encryption)',
help='Use Clear Key configuration, a.k.a AES encryption. It\'s intended for non-DRM keys.')
c.argument('open_restriction',
action='store_true',
arg_group='Open Restriction',
help='Use open restriction. License or key will be delivered on every request. Not recommended for production environments.')
c.argument('policy_option_name', help='The content key policy option name.')
c.argument('policy_option_id', help='The content key policy option identifier. This value can be obtained from "policyOptionId" property by running a show operation on a content key policy resource.')
c.argument('issuer', arg_group='Token Restriction', help='The token issuer.')
c.argument('audience', arg_group='Token Restriction', help='The audience for the token.')
c.argument('token_key', arg_group='Token Restriction', help='Either a string (for symmetric key) or a filepath to a certificate (x509) or public key (rsa). Must be used in conjunction with --token-key-type.')
c.argument('token_key_type', arg_group='Token Restriction', help='The type of the token key to be used for the primary verification key. Allowed values: {}'.format(", ".join(get_token_completion_list())))
c.argument('add_alt_token_key', arg_group='Token Restriction', help='Creates an alternate token key with either a string (for symmetric key) or a filepath to a certificate (x509) or public key (rsa). Must be used in conjunction with --add-alt-token-key-type.')
c.argument('add_alt_token_key_type', arg_group='Token Restriction', help='The type of the token key to be used for the alternate verification key. Allowed values: {}'.format(", ".join(get_token_completion_list())))
c.argument('alt_symmetric_token_keys', nargs='+', arg_group='Token Restriction', help='Space-separated list of alternate symmetric token keys.')
c.argument('alt_rsa_token_keys', nargs='+', arg_group='Token Restriction', help='Space-separated list of alternate rsa token keys.')
c.argument('alt_x509_token_keys', nargs='+', arg_group='Token Restriction', help='Space-separated list of alternate x509 certificate token keys.')
c.argument('token_claims', arg_group='Token Restriction', arg_type=token_claim_type)
c.argument('token_type', arg_group='Token Restriction',
help='The type of token. Allowed values: {}.'.format(", ".join(get_token_type_completion_list())))
c.argument('open_id_connect_discovery_document', arg_group='Token Restriction', help='The OpenID connect discovery document.')
c.argument('widevine_template', arg_group='Widevine Configuration', help='JSON Widevine license template. Use @{file} to load from a file.')
c.argument('fp_playback_duration_seconds', arg_group='FairPlay Configuration', help='Playback duration')
c.argument('fp_storage_duration_seconds', arg_group='FairPlay Configuration', help='Storage duration')
c.argument('ask', arg_group='FairPlay Configuration', help='The key that must be used as FairPlay Application Secret Key, which is a 32 character hex string.')
c.argument('fair_play_pfx_password', arg_group='FairPlay Configuration', help='The password encrypting FairPlay certificate in PKCS 12 (pfx) format.')
c.argument('fair_play_pfx', arg_group='FairPlay Configuration', help='The filepath to a FairPlay certificate file in PKCS 12 (pfx) format (including private key).')
c.argument('rental_and_lease_key_type', arg_group='FairPlay Configuration', help='The rental and lease key type. Available values: {}.'.format(", ".join(get_fairplay_rentalandlease_completion_list())))
c.argument('rental_duration', arg_group='FairPlay Configuration', help='The rental duration. Must be greater than or equal to 0.')
c.argument('play_ready_template', arg_group='PlayReady Configuration', help='JSON PlayReady license template. Use @{file} to load from a file.')
with self.argument_context('ams content-key-policy list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams content-key-policy show') as c:
c.argument('with_secrets',
action='store_true',
help='Include secret values of the content key policy.')
with self.argument_context('ams streaming-locator') as c:
c.argument('account_name', account_name_arg_type)
c.argument('default_content_key_policy_name', default_policy_name_arg_type)
c.argument('streaming_locator_name', name_arg_type, id_part='child_name_1',
help='The name of the streaming locator.')
c.argument('asset_name',
help='The name of the asset used by the streaming locator.')
c.argument('streaming_policy_name',
help='The name of the streaming policy used by the streaming locator. You can either create one with `az ams streaming policy create` or use any of the predefined policies: {}'.format(", ".join(get_default_streaming_policies_completion_list())))
c.argument('start_time', type=datetime_format,
help="The ISO 8601 DateTime start time (Y-m-d'T'H:M:S'Z') of the streaming locator.")
c.argument('end_time', type=datetime_format,
help="The ISO 8601 DateTime end time (Y-m-d'T'H:M:S'Z') of the streaming locator.")
c.argument('streaming_locator_id', help='The identifier of the streaming locator.')
c.argument('alternative_media_id', help='An alternative media identifier associated with the streaming locator.')
c.argument('content_keys', help='JSON string with the content keys to be used by the streaming locator. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/streaminglocators/create#streaminglocatorcontentkey')
c.argument('filters', nargs='+', help='A space-separated list of asset filter names and/or account filter names.')
with self.argument_context('ams streaming-locator list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams streaming-policy') as c:
c.argument('account_name', account_name_arg_type)
c.argument('streaming_policy_name', name_arg_type, id_part='child_name_1', help='The name of the streaming policy.')
c.argument('default_content_key_policy_name', help='Default Content Key used by current streaming policy.')
c.argument('no_encryption_protocols', nargs='+', help='Space-separated list of enabled protocols for NoEncryption. Allowed values: {}.'.format(", ".join(get_protocols_completion_list())))
c.argument('envelope_protocols', nargs='+', arg_group='Envelope Encryption', help='Space-separated list of enabled protocols for Envelope Encryption. Allowed values: {}.'.format(", ".join(get_protocols_completion_list())))
c.argument('envelope_clear_tracks', arg_group='Envelope Encryption', help='The JSON representing which tracks should not be encrypted. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/streamingpolicies/create#trackselection')
c.argument('envelope_key_to_track_mappings', arg_group='Envelope Encryption', help='The JSON representing a list of StreamingPolicyContentKey. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/streamingpolicies/create#streamingpolicycontentkey')
c.argument('envelope_default_key_label', arg_group='Envelope Encryption', help='Label used to specify Content Key when creating a streaming locator.')
c.argument('envelope_default_key_policy_name', arg_group='Envelope Encryption', help='Policy used by Default Key.')
c.argument('envelope_template', arg_group='Envelope Encryption', help='The KeyAcquistionUrlTemplate is used to point to user specified service to delivery content keys.')
c.argument('cenc_protocols', nargs='+', arg_group='Common Encryption CENC', help='Space-separated list of enabled protocols for Common Encryption CENC. Allowed values: {}.'.format(", ".join(get_protocols_completion_list())))
c.argument('cenc_default_key_label', arg_group='Common Encryption CENC', help='Label to specify Default Content Key for an encryption scheme.')
c.argument('cenc_default_key_policy_name', arg_group='Common Encryption CENC', help='Policy used by Default Content Key.')
c.argument('cenc_clear_tracks', arg_group='Common Encryption CENC', help='The JSON representing which tracks should not be encrypted. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/streamingpolicies/create#trackselection')
c.argument('cenc_key_to_track_mappings', arg_group='Common Encryption CENC', help='The JSON representing a list of StreamingPolicyContentKey. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/streamingpolicies/create#streamingpolicycontentkey')
c.argument('cenc_play_ready_attributes', arg_group='Common Encryption CENC', help='Custom attributes for PlayReady.')
c.argument('cenc_widevine_template', arg_group='Common Encryption CENC', help='The custom license acquisition URL template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys.')
c.argument('cenc_play_ready_template', arg_group='Common Encryption CENC', help='The custom license acquisition URL template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys.')
c.argument('cenc_disable_widevine', arg_group='Common Encryption CENC', arg_type=get_three_state_flag(), help='If specified, no Widevine cenc DRM will be configured. If --cenc-disable-widevine is set, --cenc-disable-play-ready cannot also be set.')
c.argument('cenc_disable_play_ready', arg_group='Common Encryption CENC', arg_type=get_three_state_flag(), help='If specified, no PlayReady cenc DRM will be configured. If --cenc-disable-play-ready is set, --cenc-disable-widevine cannot also be set.')
c.argument('cbcs_protocols', nargs='+', arg_group='Common Encryption CBCS', help='Space-separated list of enabled protocols for Common Encryption CBCS. Allowed values: {}.'.format(", ".join(get_protocols_completion_list())))
c.argument('cbcs_default_key_label', arg_group='Common Encryption CBCS', help='Label to specify Default Content Key for an encryption scheme.')
c.argument('cbcs_default_key_policy_name', arg_group='Common Encryption CBCS', help='Policy used by Default Content Key.')
c.argument('cbcs_clear_tracks', arg_group='Common Encryption CBCS', help='The JSON representing which tracks should not be encrypted. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/streamingpolicies/create#trackselection')
c.argument('cbcs_key_to_track_mappings', arg_group='Common Encryption CBCS', help='The JSON representing a list of StreamingPolicyContentKey. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/streamingpolicies/create#streamingpolicycontentkey')
c.argument('cbcs_play_ready_attributes', arg_group='Common Encryption CBCS', help='Custom attributes for PlayReady.', deprecate_info=c.deprecate(hide=True))
c.argument('cbcs_play_ready_template', arg_group='Common Encryption CBCS', help='The custom license acquisition URL template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys.', deprecate_info=c.deprecate(hide=True))
c.argument('cbcs_widevine_template', arg_group='Common Encryption CBCS', help='The custom license acquisition URL template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys.', deprecate_info=c.deprecate(hide=True))
c.argument('cbcs_fair_play_template', arg_group='Common Encryption CBCS', help='The custom license acquisition URL template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys.')
c.argument('cbcs_fair_play_allow_persistent_license', arg_group='Common Encryption CBCS', arg_type=get_three_state_flag(), help='Allows the license to be persistent or not.')
with self.argument_context('ams streaming-policy list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams streaming-endpoint') as c:
c.argument('streaming_endpoint_name', name_arg_type, id_part='child_name_1',
help='The name of the streaming endpoint.')
c.argument('account_name', account_name_arg_type)
c.argument('tags', arg_type=tags_type)
c.argument('description', help='The streaming endpoint description.')
c.argument('scale_units', help='The number of scale units for Premium StreamingEndpoints. For Standard StreamingEndpoints, set this value to 0. Use the Scale operation to adjust this value for Premium StreamingEndpoints.')
c.argument('availability_set_name', help='The name of the AvailabilitySet used with this StreamingEndpoint for high availability streaming. This value can only be set at creation time.')
c.argument('max_cache_age', help='Max cache age.')
c.argument('custom_host_names', nargs='+', help='Space-separated list of custom host names for the streaming endpoint. Use "" to clear existing list.')
c.argument('cdn_provider', arg_group='CDN Support', help='The CDN provider name. Allowed values: {}.'.format(", ".join(get_cdn_provider_completion_list())))
c.argument('cdn_profile', arg_group='CDN Support', help='The CDN profile name.')
c.argument('client_access_policy', arg_group='Cross Site Access Policies',
help='The XML representing the clientaccesspolicy data used by Microsoft Silverlight and Adobe Flash. Use @{file} to load from a file. For further information about the XML structure please refer to documentation on https://docs.microsoft.com/rest/api/media/operations/crosssiteaccesspolicies')
c.argument('cross_domain_policy', arg_group='Cross Site Access Policies',
help='The XML representing the crossdomain data used by Silverlight. Use @{file} to load from a file. For further information about the XML structure please refer to documentation on https://docs.microsoft.com/rest/api/media/operations/crosssiteaccesspolicies')
c.argument('auto_start', action='store_true', help='The flag indicates if the resource should be automatically started on creation.')
c.argument('ips', nargs='+', arg_group='Access Control Support', help='Space-separated IP addresses for access control. Allowed IP addresses can be specified as either a single IP address (e.g. "10.0.0.1") or as an IP range using an IP address and a CIDR subnet mask (e.g. "10.0.0.1/22"). Use "" to clear existing list. If no IP addresses are specified any IP address will be allowed.')
c.argument('disable_cdn', arg_group='CDN Support', action='store_true', help='Use this flag to disable CDN for the streaming endpoint.')
with self.argument_context('ams streaming-endpoint list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams streaming-endpoint scale') as c:
c.argument('scale_unit', options_list=['--scale-units'], help='The number of scale units for Premium StreamingEndpoints.')
with self.argument_context('ams streaming-endpoint akamai') as c:
c.argument('identifier', help='The identifier for the authentication key. This is the nonce provided by Akamai.')
c.argument('base64_key', help='Base64-encoded authentication key that will be used by the CDN. The authentication key provided by Akamai is an ASCII encoded string, and must be converted to bytes and then base64 encoded.')
c.argument('expiration', type=datetime_format,
help='The ISO 8601 DateTime value that specifies when the Akamai authentication expires.')
with self.argument_context('ams streaming-endpoint list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams live-event') as c:
c.argument('account_name', account_name_arg_type)
c.argument('live_event_name', name_arg_type, id_part='child_name_1',
help='The name of the live event.')
c.argument('streaming_protocol', arg_type=get_enum_type(LiveEventInputProtocol),
arg_group='Input', help='The streaming protocol for the live event. This value is specified at creation time and cannot be updated.')
c.argument('auto_start', action='store_true', help='The flag indicates if the resource should be automatically started on creation.')
c.argument('encoding_type', arg_group='Encoding', help='The encoding type for live event. This value is specified at creation time and cannot be updated. Allowed values: {}.'.format(", ".join(get_encoding_types_list())))
c.argument('preset_name', arg_group='Encoding', help='The encoding preset name. This value is specified at creation time and cannot be updated.')
c.argument('stretch_mode', arg_group='Encoding', help='Specifies how the input video will be resized to fit the desired output resolution(s). Default is None. Allowed values: {}.'.format(", ".join(get_stretch_mode_types_list())))
c.argument('key_frame_interval', arg_group='Encoding', help='Use an ISO 8601 time value between 0.5 to 20 seconds to specify the output fragment length for the video and audiotracks of an encoding live event. For example, use PT2S to indicate 2 seconds. For the video track it also defines the key frame interval, or the length of a GoP (group of pictures). If this value is not set for anencoding live event, the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live events.')
c.argument('tags', arg_type=tags_type)
c.argument('key_frame_interval_duration', key_frame_interval_duration_arg_type, arg_group='Input', validator=validate_key_frame_interval_duration,
help='ISO 8601 timespan duration of the key frame interval duration in seconds. The value should be an interger in the range of 1 (PT1S or 00:00:01) to 30 (PT30S or 00:00:30) seconds.')
c.argument('access_token', arg_group='Input', help='A unique identifier for a stream. This can be specified at creation time but cannot be updated. If omitted, the service will generate a unique value.')
c.argument('description', help='The live event description.')
c.argument('ips', nargs='+', arg_group='Input', help='Space-separated IP addresses for access control. Allowed IP addresses can be specified as either a single IP address (e.g. "10.0.0.1") or as an IP range using an IP address and a CIDR subnet mask (e.g. "10.0.0.1/22"). Use "" to clear existing list. Use "AllowAll" to allow all IP addresses. Allowing all IPs is not recommended for production environments.')
c.argument('preview_ips', nargs='+', arg_group='Preview', help='Space-separated IP addresses for access control. Allowed IP addresses can be specified as either a single IP address (e.g. "10.0.0.1") or as an IP range using an IP address and a CIDR subnet mask (e.g. "10.0.0.1/22"). Use "" to clear existing list. Use "AllowAll" to allow all IP addresses. Allowing all IPs is not recommended for production environments.')
c.argument('preview_locator', arg_group='Preview', help='The identifier of the preview locator in Guid format. Specifying this at creation time allows the caller to know the preview locator url before the event is created. If omitted, the service will generate a random identifier. This value cannot be updated once the live event is created.')
c.argument('streaming_policy_name', arg_group='Preview', help='The name of streaming policy used for the live event preview. This can be specified at creation time but cannot be updated.')
c.argument('alternative_media_id', arg_group='Preview', help='An Alternative Media Identifier associated with the StreamingLocator created for the preview. This value is specified at creation time and cannot be updated. The identifier can be used in the CustomLicenseAcquisitionUrlTemplate or the CustomKeyAcquisitionUrlTemplate of the StreamingPolicy specified in the StreamingPolicyName field.')
c.argument('client_access_policy', arg_group='Cross Site Access Policies', help='Filepath to the clientaccesspolicy.xml used by Microsoft Silverlight and Adobe Flash. Use @{file} to load from a file.')
c.argument('cross_domain_policy', arg_group='Cross Site Access Policies', help='Filepath to the crossdomain.xml used by Microsoft Silverlight and Adobe Flash. Use @{file} to load from a file.')
c.argument('stream_options', nargs='+', arg_type=get_enum_type(StreamOptionsFlag), help='The options to use for the LiveEvent. This value is specified at creation time and cannot be updated.')
c.argument('transcription_lang', help='Live transcription language for the live event. Allowed values: {} See https://go.microsoft.com/fwlink/?linkid=2133742 for more information about the live transcription feature.'.format(", ".join(get_allowed_transcription_languages())))
c.argument('use_static_hostname', help='Specifies whether a static hostname would be assigned to the live event preview and ingest endpoints. This value can only be updated if the live event is in Standby state. If hostname_prefix is not specified, the live event name will be used as the hostname prefix.')
c.argument('hostname_prefix', help='When useStaticHostname is set to true, hostname_prefix specifies the first part of the hostname assigned to the live event preview and ingest endpoints. The final hostname would be a combination of this prefix, the media service account name and a short code for the Azure Media Services data center.')
c.argument('remove_outputs_on_stop', action='store_true', help='Remove live outputs on stop.')
with self.argument_context('ams live-event list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams live-output') as c:
c.argument('account_name', account_name_arg_type)
c.argument('live_event_name', id_part='child_name_1',
help='The name of the live event.')
c.argument('live_output_name', name_arg_type, id_part='child_name_2',
help='The name of the live output.')
with self.argument_context('ams live-output list') as c:
c.argument('account_name', id_part=None)
with self.argument_context('ams live-output create') as c:
c.argument('asset_name', help='The name of the asset.')
c.argument('archive_window_length', archive_window_length_arg_type, validator=validate_archive_window_length,
help="ISO 8601 timespan duration of the archive window length. This is the duration that customer want to retain the recorded content. Minimum window is 5 minutes (PT5M or 00:05:00). Maximum window is 25 hours (PT25H or 25:00:00). For example, to retain the output for 10 minutes, use PT10M or 00:10:00")
c.argument('manifest_name', help='The manifest file name. If not provided, the service will generate one automatically.')
c.argument('description', help='The live output description.')
c.argument('fragments_per_ts_segment', help='The number of fragments per HLS segment.')
c.argument('output_snap_time', help='The output snapshot time.')
with self.argument_context('ams account-filter') as c:
c.argument('account_name', account_name_arg_type)
c.argument('filter_name', name_arg_type, id_part='child_name_1', help='The name of the account filter.')
c.argument('start_timestamp', arg_group='Presentation Time Range',
help='Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is the timescale, so a startTimestamp of 150000000 would be for 15 seconds. Use startTimestamp and endTimestampp to trim the fragments that will be in the playlist (manifest). For example, startTimestamp=40000000 and endTimestamp=100000000 using the default timescale will generate a playlist that contains fragments from between 4 seconds and 10 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest.')
c.argument('end_timestamp', arg_group='Presentation Time Range',
help='Applies to Video on Demand (VoD). For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is the timescale, so an endTimestamp of 1800000000 would be for 3 minutes. Use startTimestamp and endTimestamp to trim the fragments that will be in the playlist (manifest). For example, startTimestamp=40000000 and endTimestamp=100000000 using the default timescale will generate a playlist that contains fragments from between 4 seconds and 10 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest.')
c.argument('presentation_window_duration', arg_group='Presentation Time Range',
help='Applies to Live Streaming only. Use presentationWindowDuration to apply a sliding window of fragments to include in a playlist. The unit for this property is timescale (see below). For example, set presentationWindowDuration=1200000000 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds.')
c.argument('live_backoff_duration', arg_group='Presentation Time Range',
help='Applies to Live Streaming only. This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit for this property is timescale (see below). The maximum live back off duration is 300 seconds (3000000000). For example, a value of 2000000000 means that the latest available content is 20 seconds delayed from the real live edge.')
c.argument('timescale', arg_group='Presentation Time Range',
help='Applies to all timestamps and durations in a Presentation Time Range, specified as the number of increments in one second. Default is 10000000 - ten million increments in one second, where each increment would be 100 nanoseconds long. For example, if you want to set a startTimestamp at 30 seconds, you would use a value of 300000000 when using the default timescale.')
c.argument('force_end_timestamp', arg_group='Presentation Time Range', arg_type=get_three_state_flag(),
help='Applies to Live Streaming only. Indicates whether the endTimestamp property must be present. If true, endTimestamp must be specified or a bad request code is returned. Allowed values: false, true.')
c.argument('bitrate', help='The first quality bitrate.', deprecate_info=c.deprecate(target='--bitrate', redirect='--first-quality', hide=True))
c.argument('first_quality', help='The first quality (lowest) bitrate to include in the manifest.')
c.argument('tracks', help='The JSON representing the track selections. Use @{file} to load from a file. For further information about the JSON structure please refer to swagger documentation on https://docs.microsoft.com/rest/api/media/accountfilters/createorupdate#filtertrackselection')
with self.argument_context('ams account-filter list') as c:
c.argument('account_name', id_part=None)
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/ams/_params.py
|
Python
|
mit
| 51,064 | 0.004015 |
import os
os.chdir('C:/Users/Ramaneek/SkyDrive/Documents/University/Third Year/CSC320/project 2/')
###########################################################################
## Handout painting code.
###########################################################################
from PIL import Image
from pylab import *
from canny import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import random
import time
import matplotlib.image as mpimg
import scipy as sci
import scipy.misc
from scipy.signal import convolve2d as conv
np.set_printoptions(threshold = np.nan)
def colorImSave(filename, array):
imArray = sci.misc.imresize(array, 3., 'nearest')
if (len(imArray.shape) == 2):
sci.misc.imsave(filename, cm.jet(imArray))
else:
sci.misc.imsave(filename, imArray)
def markStroke(mrkd, p0, p1, rad, val):
# Mark the pixels that will be painted by
# a stroke from pixel p0 = (x0, y0) to pixel p1 = (x1, y1).
# These pixels are set to val in the ny x nx double array mrkd.
# The paintbrush is circular with radius rad>0
sizeIm = mrkd.shape
sizeIm = sizeIm[0:2];
nx = sizeIm[1]
ny = sizeIm[0]
p0 = p0.flatten('F')
p1 = p1.flatten('F')
rad = max(rad,1)
# Bounding box
concat = np.vstack([p0,p1])
bb0 = np.floor(np.amin(concat, axis=0))-rad
bb1 = np.ceil(np.amax(concat, axis=0))+rad
# Check for intersection of bounding box with image.
intersect = 1
if ((bb0[0] > nx) or (bb0[1] > ny) or (bb1[0] < 1) or (bb1[1] < 1)):
intersect = 0
if intersect:
# Crop bounding box.
bb0 = np.amax(np.vstack([np.array([bb0[0], 1]), np.array([bb0[1],1])]), axis=1)
bb0 = np.amin(np.vstack([np.array([bb0[0], nx]), np.array([bb0[1],ny])]), axis=1)
bb1 = np.amax(np.vstack([np.array([bb1[0], 1]), np.array([bb1[1],1])]), axis=1)
bb1 = np.amin(np.vstack([np.array([bb1[0], nx]), np.array([bb1[1],ny])]), axis=1)
# Compute distance d(j,i) to segment in bounding box
tmp = bb1 - bb0 + 1
szBB = [tmp[1], tmp[0]]
q0 = p0 - bb0 + 1
q1 = p1 - bb0 + 1
t = q1 - q0
nrmt = np.linalg.norm(t)
[x,y] = np.meshgrid(np.array([i+1 for i in range(int(szBB[1]))]), np.array([i+1 for i in range(int(szBB[0]))]))
d = np.zeros(szBB)
d.fill(float("inf"))
if nrmt == 0:
# Use distance to point q0
d = np.sqrt( (x - q0[0])**2 +(y - q0[1])**2)
idx = (d <= rad)
else:
# Use distance to segment q0, q1
t = t/nrmt
n = [t[1], -t[0]]
tmp = t[0] * (x - q0[0]) + t[1] * (y - q0[1])
idx = (tmp >= 0) & (tmp <= nrmt)
if np.any(idx.flatten('F')):
d[np.where(idx)] = abs(n[0] * (x[np.where(idx)] - q0[0]) + n[1] * (y[np.where(idx)] - q0[1]))
idx = (tmp < 0)
if np.any(idx.flatten('F')):
d[np.where(idx)] = np.sqrt( (x[np.where(idx)] - q0[0])**2 +(y[np.where(idx)] - q0[1])**2)
idx = (tmp > nrmt)
if np.any(idx.flatten('F')):
d[np.where(idx)] = np.sqrt( (x[np.where(idx)] - q1[0])**2 +(y[np.where(idx)] - q1[1])**2)
#Pixels within crop box to paint have distance <= rad
idx = (d <= rad)
#Mark the pixels
if np.any(idx.flatten('F')):
xy = (bb0[1]-1+y[np.where(idx)] + sizeIm[0] * (bb0[0]+x[np.where(idx)]-2)).astype(int)
sz = mrkd.shape
m = mrkd.flatten('F')
m[xy-1] = val
mrkd = m.reshape(mrkd.shape[0], mrkd.shape[1], order = 'F')
'''
row = 0
col = 0
for i in range(len(m)):
col = i//sz[0]
mrkd[row][col] = m[i]
row += 1
if row >= sz[0]:
row = 0
'''
return mrkd
def paintStroke(canvas, x, y, p0, p1, colour, rad):
# Paint a stroke from pixel p0 = (x0, y0) to pixel p1 = (x1, y1)
# on the canvas (ny x nx x 3 double array).
# The stroke has rgb values given by colour (a 3 x 1 vector, with
# values in [0, 1]. The paintbrush is circular with radius rad>0
sizeIm = canvas.shape
sizeIm = sizeIm[0:2]
idx = markStroke(np.zeros(sizeIm), p0, p1, rad, 1) > 0
# Paint
if np.any(idx.flatten('F')):
canvas = np.reshape(canvas, (np.prod(sizeIm),3), "F")
xy = y[idx] + sizeIm[0] * (x[idx]-1)
canvas[xy-1,:] = np.tile(np.transpose(colour[:]), (len(xy), 1))
canvas = np.reshape(canvas, sizeIm + (3,), "F")
return canvas
if __name__ == "__main__":
# Read image and convert it to double, and scale each R,G,B
# channel to range [0,1].
imRGB = array(Image.open('orchid.jpg'))
imRGB = double(imRGB) / 255.0
plt.clf()
plt.axis('off')
sizeIm = imRGB.shape
sizeIm = sizeIm[0:2]
# Set radius of paint brush and half length of drawn lines
rad = 1
halfLen = 5
# Set up x, y coordinate images, and canvas.
[x, y] = np.meshgrid(np.array([i+1 for i in range(int(sizeIm[1]))]), np.array([i+1 for i in range(int(sizeIm[0]))]))
canvas = np.zeros((sizeIm[0],sizeIm[1], 3))
canvas.fill(-1) ## Initially mark the canvas with a value out of range.
# Negative values will be used to denote pixels which are unpainted.
# Random number seed
np.random.seed(29645)
# Orientation of paint brush strokes
theta = 2 * pi * np.random.rand(1,1)[0][0]
# Set vector from center to one end of the stroke.
delta = np.array([cos(theta), sin(theta)])
time.time()
time.clock()
k=0
#####################################################################################
gray()
#imRGB_mono = np.zeros((sizeIm[0], sizeIm[1]))
#imRGB_mono = imRGB[:,:,0] * 0.30 + imRGB[:,:,1] * 0.59 + imRGB[:,:,2] * 0.11
#using canny edge detection on red filter
imRGB_mono = np.zeros((sizeIm[0], sizeIm[1], 3))
imRGB_mono = imRGB[:,:,0]
#orchid
high = 20; low = 7;
#myimg
#high = 15; low = 2;
canny_im = np.zeros((sizeIm[0],sizeIm[1], 3))
canny_im = canny(imRGB_mono, 2.0, high, low)
imshow(canny_im)
show()
### Part 5 code
imin = imRGB_mono.copy() * 255.0
wsize = 5
sigma = 4
gausskernel = gaussFilter(sigma, window = wsize)
# fx is the filter for vertical gradient
# fy is the filter for horizontal gradient
# Please not the vertical direction is positive X
fx = createFilter([0, 1, 0,
0, 0, 0,
0, -1, 0])
fy = createFilter([ 0, 0, 0,
1, 0, -1,
0, 0, 0])
imout = conv(imin, gausskernel, 'valid')
# print "imout:", imout.shape
gradxx = conv(imout, fx, 'valid')
gradyy = conv(imout, fy, 'valid')
gradx = np.zeros(imRGB_mono.shape)
grady = np.zeros(imRGB_mono.shape)
padx = (imin.shape[0] - gradxx.shape[0]) / 2.0
pady = (imin.shape[1] - gradxx.shape[1]) / 2.0
gradx[padx:-padx, pady:-pady] = gradxx
grady[padx:-padx, pady:-pady] = gradyy
# Net gradient is the square root of sum of square of the horizontal
# and vertical gradients
grad = hypot(gradx, grady)
theta = arctan2(grady, gradx)
theta = 180 + (180 / pi) * theta
# Only significant magnitudes are considered. All others are removed
xx, yy = where(grad < 0.33)
theta[xx, yy] = math.degrees(2 * pi * np.random.rand(1,1)[0][0])
#grad[xx, yy] = 0 not needed
imshow(theta)
show()
#colorImSave("flipped_fy_part5_theta.png", theta)
normals = theta.copy() + 90 #add pi/2 to it for the normals
#####################################################################################
#run while there isn still a pixel left to paint
while len(where(canvas < 0)[0]) != 0:
#tuple of pixels not painted
empty_canvas_pixels = where(canvas < 0)
#choose a random non-painted pixel from the tuple
index = randint(0, len(empty_canvas_pixels[0]))
#get the position for the original canvas to paint on in array form
cntr = array([empty_canvas_pixels[1][index], empty_canvas_pixels[0][index]]) + 1
# Grab colour from image at center position of the stroke.
colour = np.reshape(imRGB[cntr[1]-1, cntr[0]-1, :],(3,1))
#preturb each r,g,b colour seperately
colour[0] = colour[0] - randint(-15, 15)*1.0/255
colour[0] = colour[0] - randint(-15, 15)*1.0/255
colour[0] = colour[0] - randint(-15, 15)*1.0/255
#scale colour from -15% to +15% and clamp to valid range [0,1]
colour = colour * (randint(-15,15) + 100)/100
colour = np.clip(colour, 0, 1)
#preturb stroke orientation
perturb_orientation = randint(-15, 15)
#preturb stroke length, doing this for extra randomness for winning the bonus marks
#pass
# Add the stroke to the canvas
nx, ny = (sizeIm[1], sizeIm[0])
length1, length2 = (halfLen, halfLen)
if canny_im[cntr[1]-1, cntr[0]-1] > 0:
canvas = paintStroke(canvas, x, y, cntr, cntr, colour, rad)
else:
delta = np.array([
cos(math.radians(normals[cntr[1]-1][cntr[0]-1] + perturb_orientation)),
sin(math.radians(normals[cntr[1]-1][cntr[0]-1] + perturb_orientation))
])
i = 0
left = cntr - delta*i - 1
#while we're still less than or equal to halflen away from the center
#and the corresponding pixel is not an edel
while i <= length1: #and canny_im[left[1], left[0]] == 0:
if canny_im[left[1], left[0]] != 0:
#print "hit edge breaking"
break
canvas = paintStroke(canvas, x, y, cntr - delta * i, cntr, colour, rad)
left = cntr - delta*i - 1
if left[0] < 0 or left[1] < 0 or left[1] >= canny_im.shape[0] or left[0] >= canny_im.shape[1]: #then going out of bound
break
i += 1
i = 0
right = cntr + delta*i - 1
#now do it for the opposite direction
while i <= length2: #and canny_im[right[1], riimshowght[0]] == 0:
if canny_im[right[1], right[0]] != 0:
#print "hit edge breaking"
break
canvas = paintStroke(canvas, x, y, cntr, cntr + delta * i, colour, rad)
right = cntr + delta*i -1
if right[0] < 0 or right[1] < 0 or right[1] >= canny_im.shape[0] or right[0] >= canny_im.shape[1]: #then going out of bound
break
i += 1
#canvas = paintStroke(canvas, x, y, cntr - delta * length2, cntr + delta * length1, colour, rad)
#print imRGB[cntr[1]-1, cntr[0]-1, :], canvas[cntr[1]-1, cntr[0]-1, :]
print 'stroke', k
k += 1
print "done!"
time.time()
canvas[canvas < 0] = 0.0
plt.clf()
plt.axis('off')
figure(1)
plt.imshow(canvas)
#show()
##FOR THE BONUS MARKS
#going to blur each rgb channel a bit to mask some of the sharp edges
#this makes the image look less digitized and more natural since the colors
#blend in together
fi = zeros((sizeIm[0], sizeIm[1], 3))
#for dog image use sigma values 1, 1.2, 1.1 respectively for rgb channels
fi[:,:,0] = gaussian_filter(canvas[:,:,0], sigma = 0.4)
fi[:,:,1] = gaussian_filter(canvas[:,:,1], sigma = 0.6)
fi[:,:,2] = gaussian_filter(canvas[:,:,2], sigma = 0.5)
figure(2)
imshow(fi)
show()
#plt.pause(3)
#colorImSave('output.png', canvas)
|
RamaneekGill/CSC320-Winter-2014
|
project 2/p2.py
|
Python
|
gpl-2.0
| 12,095 | 0.015048 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='salmon',
version='0.3.0-dev',
description="A simple metric collector with alerts.",
long_description=open('README.rst').read(),
author="Peter Baumgarter",
author_email='pete@lincolnloop.com',
url='https://github.com/lincolnloop/salmon',
license='BSD',
install_requires=[
'django==1.6.1',
'djangorestframework==2.3.9',
'South==0.8.3',
'logan==0.5.9.1',
'gunicorn==18.0',
'whisper==0.9.10',
'dj-static==0.0.5',
'pytz',
],
entry_points={
'console_scripts': [
'salmon = salmon.core.runner:main',
],
},
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
|
lincolnloop/salmon
|
setup.py
|
Python
|
bsd-3-clause
| 801 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.