repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
streamlink/streamlink
|
src/streamlink/plugins/teamliquid.py
|
Python
|
bsd-2-clause
| 1,128 | 0.000887 |
"""
$url teamliquid.net
$url tl.net
$type live
"""
import logging
import re
from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugins.afreeca import AfreecaTV
from streamlink.plu
|
gins.twitch import Twitch
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?(?:tl|teamliquid)\.ne
|
t/video/streams/"
))
class Teamliquid(Plugin):
def _get_streams(self):
res = self.session.http.get(self.url)
stream_address_re = re.compile(r'''href\s*=\s*"([^"]+)"\s*>\s*View on''')
stream_url_match = stream_address_re.search(res.text)
if stream_url_match:
stream_url = stream_url_match.group(1)
log.info("Attempting to play streams from {0}".format(stream_url))
p = urlparse(stream_url)
if p.netloc.endswith("afreecatv.com"):
self.stream_weight = AfreecaTV.stream_weight
elif p.netloc.endswith("twitch.tv"):
self.stream_weight = Twitch.stream_weight
return self.session.streams(stream_url)
__plugin__ = Teamliquid
|
koljanos/sga-lti
|
sga/backend/send_grades.py
|
Python
|
bsd-3-clause
| 4,039 | 0.00099 |
""""
This module handles sending grades back to edX
Most of this module is a python 3 port of pylti (github.com/mitodl/sga-lti)
and should be moved back into that library.
"""
import uuid
from xml.etree import ElementTree as etree
import oauth2
from django.conf import settings
class SendGradeFailure(Exception):
""" Exception class for failures sending grades to edX"""
def send_grade(consumer_key, edx_url, result_id, grade):
""" Sends a grade to edX """
if consumer_key not in settings.LTI_OAUTH_CREDENTIALS:
raise SendGradeFailure("Invalid consumer_key %s" % consumer_key)
body = generate_request_xml(str(uuid.uuid1()), "replaceResult", result_id, grade)
secret = settings.LTI_OAUTH_CREDENTIALS[consumer_key]
response, content = _post_patched_request(consumer_key, secret, body, edx_url, "POST", "application/xml")
if isinstance(content, bytes):
content = content.decode("utf8")
if "<imsx_codeMajor>success</imsx_codeMajor>" not in content:
raise SendGradeFailure("Send grades to edX returned %s" % response.status)
def _post_patched_request(lti_key, secret, body, url, method, content_type): # pylint: disable=too-many-arguments
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
|
:param client: OAuth Client
:param url: outc
|
ome url
:return: response
"""
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode("utf8"),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
return response, content
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element('imsx_POXEnvelopeRequest',
xmlns='http://www.imsglobal.org/services/'
'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='unicode'))
return ret
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/contrib/gis/gdal/srs.py
|
Python
|
mit
| 11,540 | 0.00078 |
"""
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
destructor = capi.release_srs
def __init__(self, srs_input='', srs_type='user'):
"""
Create a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.impor
|
t_wkt(srs_input)
return
elif isinstan
|
ce(srs_input, str):
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, int):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __getitem__(self, target):
"""
Return the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"Use 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, str) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Return the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Return the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Return a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morph this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morph this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Check to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Return the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Return the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Return the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Return the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Return the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Return the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Return a 2-tuple of the units value and the units name. Automatically
determine whether to return the linear or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Return a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@propert
|
manqala/erpnext
|
erpnext/stock/doctype/stock_reconciliation/test_stock_reconciliation.py
|
Python
|
gpl-3.0
| 4,815 | 0.023261 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, unittest
from frappe.utils import flt, nowdate, nowtime
from erpnext.accounts.utils import get_stock_and_account_difference
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.stock.stock_ledger import get_previous_sle, update_entries_after
from erpnext.stock.doctype.stock_reconciliation.stock_reconciliation import EmptyStockReconciliationItemsError
class TestStockReconciliation(unittest.TestCase):
def setUp(self):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
self.insert_existing_sle()
def test_reco_for_fifo(self):
self._test_reco_sle_gle("FIFO")
def test_reco_for_moving_average(self):
self._test_reco_sle_gle("Moving Average")
def _test_reco_sle_gle(self, valuation_method):
set_perpetual_inventory()
# [[qty, valuation_rate, posting_date,
# posting_time, expected_stock_value, bin_qty, bin_valuation]]
input_data = [
[50, 1000, "2012-12-26", "12:00"],
[25, 900, "2012-12-26", "12:00"],
["", 1000, "2012-12-20", "12:05"],
[20, "", "2012-12-26", "12:05"],
[0, "", "2012-12-31", "12:10"]
]
for d in input_data:
set_valuation_method("_Test Item", valuation_method)
last_sle = get_previous_sle({
"item_code": "_Test Item",
"warehouse": "_Test Warehouse - _TC",
"posting_date": d[2],
"posting_time": d[3]
})
# submit stock reconciliation
stock_reco = create_stock_reconciliation(qty=d[0], rate=d[1],
posting_date=d[2], posting_time=d[3])
# check stock value
sle = frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Reconciliation' and voucher_no=%s""", stock_reco.name, as_dict=1)
qty_after_transaction = flt(d[0]) if d[0] != "" else flt(last_sle.get("qty_after_transaction"))
valuation_rate = flt(d[1]) if d[1] != "" else flt(last_sle.get("valuation_rate"))
if qty_after_transaction == last_sle.get("qty_after_transaction") \
and valuation_rate == last_sle.get("valuation_rate"):
self.assertFalse(sle)
else:
self.assertEqual(sle[0].qty_after_transaction, qty_after_transaction)
self.assertEqual(sle[0].stock_value, qty_after_transaction * valuation_rate)
# no gl entries
self.assertTrue(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Stock Reconciliation", "voucher_no": stock_reco.name}))
self.assertFalse(get_stock_and_account_difference(["_Test Account Stock In Hand - _TC"]))
stock_reco.cancel()
self.assertFalse(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Stock Reconciliation", "voucher_n
|
o": stock_reco.name}))
self.assertFalse(frappe.db.get_value("GL Entry",
{"voucher_type": "Stock Reconciliation", "voucher_no": stock_reco.name}))
set_perpetual_inventory(0)
def insert_existing_sle(self):
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
make_stock_entry(posting_date="2012-12-15", posting_time="02:00", item_code="_Test Item",
target="_Test Warehouse - _TC", qty=10, basic_rate=700)
make_stock_entry(pos
|
ting_date="2012-12-25", posting_time="03:00", item_code="_Test Item",
source="_Test Warehouse - _TC", qty=15)
make_stock_entry(posting_date="2013-01-05", posting_time="07:00", item_code="_Test Item",
target="_Test Warehouse - _TC", qty=15, basic_rate=1200)
def create_stock_reconciliation(**args):
args = frappe._dict(args)
sr = frappe.new_doc("Stock Reconciliation")
sr.posting_date = args.posting_date or nowdate()
sr.posting_time = args.posting_time or nowtime()
sr.set_posting_time = 1
sr.company = args.company or "_Test Company"
sr.expense_account = args.expense_account or \
("Stock Adjustment - _TC" if frappe.get_all("Stock Ledger Entry") else "Temporary Opening - _TC")
sr.cost_center = args.cost_center or "_Test Cost Center - _TC"
sr.append("items", {
"item_code": args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": args.qty,
"valuation_rate": args.rate
})
try:
sr.submit()
except EmptyStockReconciliationItemsError:
pass
return sr
def set_valuation_method(item_code, valuation_method):
frappe.db.set_value("Item", item_code, "valuation_method", valuation_method)
for warehouse in frappe.get_all("Warehouse", filters={"company": "_Test Company"}, fields=["name", "is_group"]):
if not warehouse.is_group:
update_entries_after({
"item_code": item_code,
"warehouse": warehouse.name
}, allow_negative_stock=1)
test_dependencies = ["Item", "Warehouse"]
|
andersbogsnes/blog
|
app/forms.py
|
Python
|
mit
| 1,224 | 0.006536 |
from flask_wtf import Form
from flask_wtf.file import FileRequired, FileAllowed, FileField
from wtforms import StringField, BooleanField, PasswordField, TextAreaField
from wtforms.validators import DataRequired, Email, Length
class SignUpForm(Form):
username = StringField('username', validators=[DataRequired(), Length(max=64)])
# password = PasswordField('password', validators=[DataRequired(), Length(max=50)])
email = StringField('email', validators=[DataRequired(), Email(), Length(max=120)])
first_name = StringField('first_name', validators=[DataRequired(), Length(max=50)])
last_name = StringField('last_name', validators=[DataRequired(), Length(max=50)])
class LoginForm(Form):
username = StringField('username', validators=[DataRequired(), Length(max=50)])
password = PasswordField('password', validators=[DataRequired(), Length(max=
|
50)])
remember_me = BooleanField('remember_me', default=False)
class PostForm(Form):
content = TextAreaField('content', validators=[DataRequired()])
class UploadPostForm(Form):
file = FileField('post', validators=[FileRequired(), FileAllowed(['md'], 'Only Markdown files!')])
|
overwrite = BooleanField('overwrite', default=False)
|
imyeego/MLinPy
|
zh_cnn_text_classify/text_cnn.py
|
Python
|
mit
| 3,414 | 0.045694 |
import tensorflow as tf
import numpy as np
class TextCNN(object):
'''
A CNN for text classification
Uses and embedding layer, followed by a convolutional, max-pooling
|
and softmax layer.
'''
def __init__(
self, sequence_length, num_classes,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output, dropout
self.input_x = tf.placeholder(tf.float32, [None, sequence_length, embedding_size], name = "input_x")
self.input_y = tf.placeholder(tf.float32, [None,
|
num_classes], name = "input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name = "dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
# self.embedded_chars = [None(batch_size), sequence_size, embedding_size]
# self.embedded_chars = [None(batch_size), sequence_size, embedding_size, 1(num_channels)]
self.embedded_chars = self.input_x
self.embedded_chars_expended = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expended,
W,
strides=[1,1,1,1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name = "relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1,1,1,1],
padding="VALID",
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnomalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape = [num_filters_total, num_classes],
initializer = tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes], name = "b"))
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name = "scores")
self.predictions = tf.argmax(self.scores, 1, name = "predictions")
# Calculate Mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits = self.scores, labels = self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name = "accuracy")
|
sshwsfc/django-xadmin
|
xadmin/plugins/language.py
|
Python
|
bsd-3-clause
| 1,002 | 0.003992 |
from django.conf import settings
from django.template import loader
from django.views.i18n import set_language
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, CommAdminView, BaseAdminView
class SetLangNavPlugin(BaseAdminPlugin):
def block_top_navmenu(self, context, nodes):
context = get_context_dict(context)
context['redirect_to'] = self.request.get_full_path()
nodes.append(loader.render_to_string('xadmin/blocks/comm.top.setlang.html', context=context))
class SetLangView(BaseAdminView):
def post(self, request, *args, **kwargs):
if 'nav_menu' in request.session:
|
del request.session['nav_menu']
return set_language(request)
if settings.LANGUAGES and 'django.middleware.locale.LocaleMiddleware' in settings.MIDDLEWARE_CLASSES:
site.register_plugin(SetLangNavPlugin, CommAdminView)
site.register_view(r'^i18n/setlang/$', SetLangView, 'set_langua
|
ge')
|
CobwebOrg/cobweb-django
|
projects/search_indexes.py
|
Python
|
mit
| 4,143 | 0.003862 |
# """SearchIndex classes for Django-haystack."""
from typing import List
from django.utils.html import format_html, mark_safe
from haystack import indexes
from projects.models import Project, Nomination, Claim
class ProjectIndex(indexes.SearchIndex, indexes.Indexable):
"""Django-haystack index of Project model."""
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=True, stored=False)
slug = indexes.CharField(model_attr='slug', indexed=True, stored=True)
title = indexes.CharField(model_attr='title', indexed=True, stored=True)
description = indexes.CharField(model_attr='description', indexed=True, stored=True)
administrators = indexes.MultiValueField(indexed=True, null=True, stored=True)
nomination_policy = indexes.CharField(model_attr='nomination_policy', indexed=True, stored=True)
# nominator_orgs
nominators = indexes.MultiValueField(indexed=True, null=True, stored=True)
# nominator_blacklist
status = indexes
|
.CharField(model_attr='status', indexed=True, stored=True)
impact_factor = indexes.IntegerField(model_attr='impact_factor', indexed=True, stored=True)
tags = indexes.MultiValueField(indexed=True, null=True, stored=True)
subject_headings = indexes.MultiValueField(indexed=True, null=True, stored=True)
|
# notes
unclaimed_nominations = indexes.IntegerField(model_attr='n_unclaimed', indexed=True, stored=True)
claimed_nominations = indexes.IntegerField(model_attr='n_claimed', indexed=True, stored=True)
held_nominations = indexes.IntegerField(model_attr='n_held', indexed=True, stored=True)
def get_model(self):
return Project
def index_queryset(self, using=None):
return self.get_model().objects.exclude(status='Deleted')
def prepare_administrators(self, obj: Project) -> List[str]:
return [user.get_absolute_url() for user in obj.administrators.all()]
def prepare_nominators(self, obj: Project) -> List[str]:
return [user.get_absolute_url for user in obj.nominators.all()]
def prepare_tags(self, obj: Project) -> List[str]:
return [tag.name for tag in obj.tags.all()]
def prepare_subject_headings(self, obj: Project) -> List[str]:
return [subj.name for subj in obj.subject_headings.all()]
class NominationIndex(indexes.SearchIndex, indexes.Indexable):
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=False)
project_pk = indexes.IntegerField(model_attr='project__pk', indexed=True, stored=True)
project_slug = indexes.CharField(model_attr='project__slug', indexed=True, stored=True)
url = indexes.CharField(model_attr='resource__url')
status = indexes.CharField(model_attr='status', indexed=True, stored=True)
# needs_claim = indexes.BooleanField(model_attr='needs_claim', indexed=True, stored=True)
# nominated_by = indexes.MultiValueField(model_attr='nominated_by', indexed=True, stored=True)
# rationale = indexes.(model_attr='rationale', indexed=True, stored=True)
# suggested_crawl_frequency = indexes.(model_attr='suggested_crawl_frequency', indexed=True, stored=True)
# suggested_crawl_end_date = indexes.(model_attr='suggested_crawl_end_date', indexed=True, stored=True)
# notes = indexes.(model_attr='notes', indexed=True, stored=True)
# impact_factor = indexes.IntegerField(model_attr='impact_factor', indexed=True, stored=True)
def get_model(self):
return Nomination
def index_queryset(self, using=None):
return self.get_model().objects.all()
class ClaimIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = Claim
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=False)
nomination_pk = indexes.IntegerField(model_attr='nomination_id',
indexed=True, stored=True)
def index_queryset(self, using=None):
return self.get_model().objects.all()
|
amrdraz/brython
|
www/src/Lib/encodings/iso8859_1.py
|
Python
|
bsd-3-clause
| 13,483 | 0.021064 |
""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSIO
|
N
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> D
|
ATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0
|
Anfauglith/iop-hd
|
test/functional/test_framework/script.py
|
Python
|
mit
| 25,954 | 0.01021 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
This file is modified from python-ioplib.
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5
|
b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN
|
= CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
|
lustigerluke/motion-track
|
config.py
|
Python
|
mit
| 1,176 | 0.005102 |
# Config.py file for motion-track.py
# Display Settings
debug = True # Set to False for no data display
window_on = False # Set to True displays opencv windows (GUI desktop reqd)
diff_window_on = False # Show OpenCV image difference window
thresh_window_on = False # Show OpenCV image Threshold window
SHOW_CIRCLE = True # show a circle otherwise show bounding rectancle on window
CIRCLE_SIZE = 8 # diameter of circle to show motion location in window
LINE_THICKNESS = 1 # thickness of bounding line in pixels
WINDOW_BIGGER = 1 # Resize multiplier for Move
|
ment Status Window
# if gui_window_on=True then makes opencv window bigger
# Note if the window is larger than 1 then a reduced frame rate will occur
# Camera Settings
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
big_w = int(CAMERA_WIDTH * WINDOW_BIGGER)
big_h = int(CAMERA_HEIGHT * WINDOW_BIGGER)
CAMERA_HFLIP = False
CAMERA_VFLIP = True
CAMERA_ROTATION=0
CAMERA_FRAMERATE = 35
FRAME_COUNTER = 1000
# M
|
otion Tracking Settings
MIN_AREA = 200 # excludes all contours less than or equal to this Area
THRESHOLD_SENSITIVITY = 25
BLUR_SIZE = 10
|
m110/grafcli
|
grafcli/commands.py
|
Python
|
mit
| 8,828 | 0.000227 |
import os
import re
import json
import shutil
import tarfile
import tempfile
from climb.config import config
from climb.commands import Commands, command, completers
from climb.exceptions import CLIException
from climb.paths import format_path, split_path, ROOT_PATH
from grafcli.documents import Document, Dashboard, Row, Panel
from grafcli.exceptions import CommandCancelled
from grafcli.resources import Resources
from grafcli.storage.system import to_file_format, from_file_format
from grafcli.utils import json_pretty
class GrafCommands(Commands):
def __init__(self, cli):
super().__init__(cli)
self._resources = Resources()
@command
@completers('path')
def ls(self, path=None):
path = format_path(self._cli.current_path, path)
result = self._resources.list(path)
return "\n".join(sorted(result))
@command
@completers('path')
def cd(self, path=None):
path = format_path(self._cli.current_path, path, default=ROOT_PATH)
# No exception means correct path
self._resources.list(path)
self._cli.set_current_path(path)
@command
@completers('path')
def cat(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
return json_pretty(document.source, colorize=config['grafcli'].getboolean('colorize'))
@command
@completers('path')
def cp(self, source, destination, match_slug=False):
if len(source) < 2:
raise CLIException("No destination provided")
destination = source.pop(-1)
destination_path = format_path(self._cli.current_path, destination)
for path in source:
source_path = format_path(self._cli.current_path, path)
document = self._resources.get(source_path)
if match_slug:
destination_path = self._match_slug(document, destination_path)
self._resources.save(destination_path, document)
self._cli.log("cp: {} -> {}", source_path, destination_path
|
)
@command
@completers('path')
def mv(self, source, destination, match_slug=False):
if len(source) < 2:
raise CLIException("No destination provided")
destination = source.pop(-1)
destination_path = format_path(self._cli.current_path, destination)
for path in source:
source_path = format_path(self._cli.current_path, path)
document = self._resources.get(source_path)
if match_slug:
destination_path
|
= self._match_slug(document, destination_path)
self._resources.save(destination_path, document)
self._resources.remove(source_path)
self._cli.log("mv: {} -> {}", source_path, destination_path)
@command
@completers('path')
def rm(self, path):
path = format_path(self._cli.current_path, path)
self._resources.remove(path)
self._cli.log("rm: {}", path)
@command
@completers('path')
def template(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
if isinstance(document, Dashboard):
template = 'dashboards'
elif isinstance(document, Row):
template = 'rows'
elif isinstance(document, Panel):
template = 'panels'
else:
raise CLIException("Unknown document type: {}".format(
document.__class__.__name__))
template_path = "/templates/{}".format(template)
self._resources.save(template_path, document)
self._cli.log("template: {} -> {}", path, template_path)
@command
@completers('path')
def editor(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
tmp_file = tempfile.mktemp(suffix=".json")
with open(tmp_file, 'w') as file:
file.write(json_pretty(document.source))
cmd = "{} {}".format(config['grafcli']['editor'], tmp_file)
exit_status = os.system(cmd)
if not exit_status:
self._cli.log("Updating: {}".format(path))
self.file_import(tmp_file, path)
os.unlink(tmp_file)
@command
@completers('path')
def merge(self, paths):
if len(paths) < 2:
raise CLIException("Provide at least two paths")
tmp_files = []
for path in paths:
formatted_path = format_path(self._cli.current_path, path)
document = self._resources.get(formatted_path)
tmp_file = tempfile.mktemp(suffix=".json")
tmp_files.append((formatted_path, tmp_file))
with open(tmp_file, 'w') as file:
file.write(json_pretty(document.source))
cmd = "{} {}".format(config['grafcli'].get('mergetool', 'vimdiff'), ' '.join([v[1] for v in tmp_files]))
exit_status = os.system(cmd)
for path, tmp_file in tmp_files:
if not exit_status:
self._cli.log("Updating: {}".format(path))
self.file_import(tmp_file, path)
os.unlink(tmp_file)
@command
@completers('path')
def pos(self, path, position):
if not path:
raise CLIException("No path provided")
if not position:
raise CLIException("No position provided")
path = format_path(self._cli.current_path, path)
parts = split_path(path)
parent_path = '/'.join(parts[:-1])
child = parts[-1]
parent = self._resources.get(parent_path)
parent.move_child(child, position)
self._resources.save(parent_path, parent)
@command
@completers('path', 'system_path')
def backup(self, path, system_path):
if not path:
raise CLIException("No path provided")
if not system_path:
raise CLIException("No system path provided")
path = format_path(self._cli.current_path, path)
system_path = os.path.expanduser(system_path)
documents = self._resources.list(path)
if not documents:
raise CLIException("Nothing to backup")
tmp_dir = tempfile.mkdtemp()
archive = tarfile.open(name=system_path, mode="w:gz")
for doc_name in documents:
file_name = to_file_format(doc_name)
file_path = os.path.join(tmp_dir, file_name)
doc_path = os.path.join(path, doc_name)
self.file_export(doc_path, file_path)
archive.add(file_path, arcname=file_name)
archive.close()
shutil.rmtree(tmp_dir)
@command
@completers('system_path', 'path')
def restore(self, system_path, path):
system_path = os.path.expanduser(system_path)
path = format_path(self._cli.current_path, path)
tmp_dir = tempfile.mkdtemp()
with tarfile.open(name=system_path, mode="r:gz") as archive:
archive.extractall(path=tmp_dir)
for name in os.listdir(tmp_dir):
try:
file_path = os.path.join(tmp_dir, name)
doc_path = os.path.join(path, from_file_format(name))
self.file_import(file_path, doc_path)
except CommandCancelled:
pass
shutil.rmtree(tmp_dir)
@command
@completers('path', 'system_path')
def file_export(self, path, system_path):
path = format_path(self._cli.current_path, path)
system_path = os.path.expanduser(system_path)
document = self._resources.get(path)
with open(system_path, 'w') as file:
file.write(json_pretty(document.source))
self._cli.log("export: {} -> {}", path, system_path)
@command
@completers('system_path', 'path')
def file_import(self, system_path, path, match_slug=False):
system_path = os.path.expanduser(system_path)
path = format_path(self._cli.current_path, path)
with open(system_path, 'r') as file:
content = file.read()
document = Document.from_source(json.loads(content))
if mat
|
jcgoble3/luapatt
|
tests/test_lua1_basics.py
|
Python
|
mit
| 6,266 | 0.009595 |
# coding: utf-8
# Copyright 2015 Jonathan Goble
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Copied from the official Lua 5.3.2 test suite and converted to Python
import sys
sys.path.insert(0, r'src')
import luapatt
### BASIC FIND TESTS
# empty patterns are tricky
def test_empty_empty():
assert luapatt.find('', '') == (0, 0)
def test_plain_empty():
assert luapatt.find('alo', '') == (0, 0)
# first position
def test_first_char():
assert luapatt.find('a\0o a\0o a\0o', 'a', 0) == (0, 1)
# starts in the middle
def test_substr_expinit_1():
assert luapatt.find('a\0o a\0o a\0o', 'a\0o', 1) == (4, 7)
# starts in the middle
def test_substr_expinit_2():
assert luapatt.find('a\0o a\0o a\0o', 'a\0o', 8) == (8, 11)
# finds at the end
def test_substr_atend():
assert luapatt.find('a\0a\0a\0a\0\0ab', '\0ab', 1) == (8, 11)
# last position
def test_last_char():
assert luapatt.find('a\0a\0a\0a\0\0ab', 'b') == (10, 11)
# check ending
def test_nomatch_pastend():
assert luapatt.find('a\0a\0a\0a\0\0ab', 'b\0') is None
def test_nomatch_pastend_nullsrc():
assert luapatt.find('', '\0') is None
def test_substr():
assert luapatt.find('alo123alo', '12') == (3, 5)
### QUANTIFIERS AND ANCHORS
def test_nomatch_startanchor():
assert luapatt.find('alo^123alo', '^12') is None
def test_dot_asterisk_basic():
assert luapatt.match("aaab", ".*b") == "aaab"
def test_dot_asterisk_backtrack1():
assert luapatt.match("aaa", ".*a") == "aaa"
def test_dot_asterisk_matchzero():
assert luapatt.match("b", ".*b") == "b"
def test_dot_plus_basic():
assert luapatt.match("aaab", ".+b") == "aaab"
def test_dot_plus_backtrack1():
assert luapatt.match("aaa", ".+a") == "aaa"
def test_dot_plus_failzero():
assert luapatt.match("b", ".+b") is None
def test_dot_question_basic_1():
assert luapatt.match("aaab", ".?b") == "ab"
def test_dot_question_basic_2():
assert luapatt.match("aaa", ".?a") == "aa"
def test_dot_question_matchzero():
assert luapatt.match("b", ".?b") == "b"
def test_percent_l():
assert luapatt.match('aloALO', '%l*') == 'alo'
def test_percent_a():
assert luapatt.match('aLo_ALO', '%a*') == 'aLo'
def test_plain_asterisk():
assert luapatt.match('aaab', 'a*') == 'aaa'
def test_full_match_asterisk():
assert luapatt.match('aaa', '^.*$') == 'aaa'
def test_asterisk_null_match():
assert luapatt.match('aaa', 'b*') == ''
def test_asterisk_null_match_2():
assert luapatt.match('aaa', 'ab*a') == 'aa'
def test_asterisk_match_one():
assert luapatt.match('aba', 'ab*a') == 'aba'
def test_plain_plus():
assert luapatt.match('aaab', 'a+') == 'aaa'
def test_full_match_plus():
assert luapatt.match('aaa', '^.+$') == 'aaa'
def test_plain_plus_failzero():
assert luapatt.match('aaa', 'b+') is None
def test_plain_plus_failzero_2():
assert luapatt.match('aaa', 'ab+a') is None
def test_plus_match_one():
assert luapatt.match('aba', 'ab+a') == 'aba'
def test_end_anchor():
assert luapatt.match('a$a', '.$') == 'a'
def test_escaped_end_anchor():
assert luapatt.match('a$a', '.%$') == 'a$'
def test_dollarsign_inmiddle():
assert luapatt.match('a$a', '.$.') == 'a$a'
def test_double_dollarsign():
assert luapatt.match('a$a', '$$') is None
def test_end_anchor_nomatch():
assert luapatt.match('a$b', 'a$') is None
def test_end_anchor_matchnull():
assert luapatt.match('a$a', '$') == ''
def test_asterisk_match_nullstring():
assert luapatt.match('', 'b*') == ''
def test_plain_nomatch():
assert luapatt.match('aaa', 'bb*') is None
def test_minus_match_zero():
assert luapatt.match('aaab',
|
'a-') == ''
def test_full_match_minus():
assert luapatt.match('aaa', '^.-$') == 'aaa'
def test_asterisk_maxexpand():
assert luapatt.match('aabaaabaaabaaaba', 'b.*b') == 'baaabaaabaaab'
def test_minus_minexpand():
assert luapatt.match('aabaaabaaabaaaba', 'b.-b') == 'baaab'
def test_dot_plain_endanchor():
assert l
|
uapatt.match('alo xo', '.o$') == 'xo'
def test_class_x2_asterisk():
assert luapatt.match(' \n isto é assim', '%S%S*') == 'isto'
def test_class_asterisk_endanchor():
assert luapatt.match(' \n isto é assim', '%S*$') == 'assim'
def test_set_asterisk_endanchor():
assert luapatt.match(' \n isto é assim', '[a-z]*$') == 'assim'
def test_negatedset_with_class():
assert luapatt.match('um caracter ? extra', '[^%sa-z]') == '?'
def test_question_match_zero():
assert luapatt.match('', 'a?') == ''
def test_question_match_one():
assert luapatt.match('á', 'á?') == 'á'
def test_multi_question():
assert luapatt.match('ábl', 'á?b?l?') == 'ábl'
def test_question_match_zero_2():
assert luapatt.match(' ábl', 'á?b?l?') == ''
def test_question_backtracking():
assert luapatt.match('aa', '^aa?a?a') == 'aa'
### OTHERS
def test_right_bracket_in_set():
assert luapatt.match(']]]áb', '[^]]') == 'á'
def test_percent_x():
assert luapatt.match("0alo alo", "%x*") == "0a"
def test_match_control_characters():
assert luapatt.match('alo alo', '%C+') == 'alo alo'
def test_match_printable():
assert luapatt.match(' \n\r*&\n\r xuxu \n\n', '%g%g%g+') == 'xuxu'
def test_match_punctuation():
assert luapatt.match('Hello World!', '%p+') == '!'
|
phodal/iot-code
|
chapter2/gpio.py
|
Python
|
mit
| 124 | 0 |
import RPi.GPIO as
|
GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(5, GPIO.OUT)
GPIO.output(5,
|
GPIO.HIGH)
GPIO.output(5, GPIO.LOW)
|
anish/buildbot
|
master/buildbot/test/unit/test_revlinks.py
|
Python
|
gpl-2.0
| 5,555 | 0.00234 |
# This file is part of B
|
uildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# L
|
icense as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.revlinks import GithubRevlink
from buildbot.revlinks import GitwebMatch
from buildbot.revlinks import RevlinkMatch
from buildbot.revlinks import SourceforgeGitRevlink
from buildbot.revlinks import SourceforgeGitRevlink_AlluraPlatform
from buildbot.revlinks import default_revlink_matcher
class TestGithubRevlink(unittest.TestCase):
revision = 'b6874701b54e0043a78882b020afc86033133f91'
url = 'https://github.com/buildbot/buildbot/commit/b6874701b54e0043a78882b020afc86033133f91'
def testHTTPS(self):
self.assertEqual(GithubRevlink(self.revision, 'https://github.com/buildbot/buildbot.git'),
self.url)
def testGIT(self):
self.assertEqual(GithubRevlink(self.revision, 'git://github.com/buildbot/buildbot.git'),
self.url)
def testSSH(self):
self.assertEqual(GithubRevlink(self.revision, 'git@github.com:buildbot/buildbot.git'),
self.url)
def testSSHuri(self):
self.assertEqual(GithubRevlink(self.revision, 'ssh://git@github.com/buildbot/buildbot.git'),
self.url)
class TestSourceforgeGitRevlink(unittest.TestCase):
revision = 'b99c89a2842d386accea8072ae5bb6e24aa7cf29'
url = 'http://gemrb.git.sourceforge.net/git/gitweb.cgi?p=gemrb/gemrb;a=commit;h=b99c89a2842d386accea8072ae5bb6e24aa7cf29' # noqa pylint: disable=line-too-long
def testGIT(self):
url = SourceforgeGitRevlink(self.revision,
'git://gemrb.git.sourceforge.net/gitroot/gemrb/gemrb')
self.assertEqual(url, self.url)
def testSSH(self):
url = SourceforgeGitRevlink(self.revision,
'somebody@gemrb.git.sourceforge.net:gitroot/gemrb/gemrb')
self.assertEqual(url, self.url)
def testSSHuri(self):
url = SourceforgeGitRevlink(self.revision,
'ssh://somebody@gemrb.git.sourceforge.net/gitroot/gemrb/gemrb')
self.assertEqual(url, self.url)
class TestSourceforgeGitRevlink_AlluraPlatform(unittest.TestCase):
revision = '6f9b1470bae497c6ce47e4cf8c9195d864d2ba2f'
url = 'https://sourceforge.net/p/klusters/klusters/ci/6f9b1470bae497c6ce47e4cf8c9195d864d2ba2f/'
def testGIT(self):
url = SourceforgeGitRevlink_AlluraPlatform(self.revision,
'git://git.code.sf.net/p/klusters/klusters')
self.assertEqual(url, self.url)
def testSSHuri(self):
url = SourceforgeGitRevlink_AlluraPlatform(
self.revision, 'ssh://somebody@git.code.sf.net/p/klusters/klusters')
self.assertEqual(url, self.url)
class TestRevlinkMatch(unittest.TestCase):
def testNotmuch(self):
revision = 'f717d2ece1836c863f9cc02abd1ff2539307cd1d'
matcher = RevlinkMatch(['git://notmuchmail.org/git/(.*)'],
r'http://git.notmuchmail.org/git/\1/commit/%s')
self.assertEqual(matcher(revision, 'git://notmuchmail.org/git/notmuch'),
'http://git.notmuchmail.org/git/notmuch/commit/f717d2ece1836c863f9cc02abd1ff2539307cd1d') # noqa pylint: disable=line-too-long
def testSingleString(self):
revision = 'rev'
matcher = RevlinkMatch('test', 'out%s')
self.assertEqual(matcher(revision, 'test'), 'outrev')
def testSingleUnicode(self):
revision = 'rev'
matcher = RevlinkMatch('test', 'out%s')
self.assertEqual(matcher(revision, 'test'), 'outrev')
def testTwoCaptureGroups(self):
revision = 'rev'
matcher = RevlinkMatch('([A-Z]*)Z([0-9]*)', r'\2-\1-%s')
self.assertEqual(matcher(revision, 'ABCZ43'), '43-ABC-rev')
class TestGitwebMatch(unittest.TestCase):
def testOrgmode(self):
revision = '490d6ace10e0cfe74bab21c59e4b7bd6aa3c59b8'
matcher = GitwebMatch(
'git://orgmode.org/(?P<repo>.*)', 'http://orgmode.org/w/')
self.assertEqual(matcher(revision, 'git://orgmode.org/org-mode.git'),
'http://orgmode.org/w/?p=org-mode.git;a=commit;h=490d6ace10e0cfe74bab21c59e4b7bd6aa3c59b8') # noqa pylint: disable=line-too-long
class TestDefaultRevlinkMultiPlexer(unittest.TestCase):
revision = "0"
def testAllRevlinkMatchers(self):
# GithubRevlink
self.assertTrue(default_revlink_matcher(
self.revision, 'https://github.com/buildbot/buildbot.git'))
# SourceforgeGitRevlink
self.assertTrue(default_revlink_matcher(
self.revision, 'git://gemrb.git.sourceforge.net/gitroot/gemrb/gemrb'))
# SourceforgeGitRevlink_AlluraPlatform
self.assertTrue(default_revlink_matcher(
self.revision, 'git://git.code.sf.net/p/klusters/klusters'))
|
badp/ganeti
|
test/py/ganeti.rapi.client_unittest.py
|
Python
|
gpl-2.0
| 59,059 | 0.004809 |
#!/usr/bin/python
#
# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the RAPI client module"""
import unittest
import warnings
import pycurl
from ganeti import opcodes
from ganeti import constants
from ganeti import http
from ganeti import serializer
from ganeti import utils
from ganeti import query
from ganeti import objects
from ganeti import rapi
from ganeti import errors
import ganeti.rapi.testutils
from ganeti.rapi import connector
from ganeti.rapi import rlib2
from ganeti.rapi import client
import testutils
# List of resource handlers which aren't used by the RAPI client
_KNOWN_UNUSED = set([
rlib2.R_root,
rlib2.R_2,
])
# Global variable for collecting used handlers
_used_handlers = None
class RapiMock(object):
def __init__(self):
self._mapper = connector.Mapper()
self._responses = []
self._last_handler = None
self._last_req_data = None
def ResetResponses(self):
del self._responses[:]
def AddResponse(self, response, code=200):
self
|
._responses.insert(0, (code, response))
def CountPending(self):
return len(self._responses)
def GetLastHandler(self):
return self._last_handle
|
r
def GetLastRequestData(self):
return self._last_req_data
def FetchResponse(self, path, method, headers, request_body):
self._last_req_data = request_body
try:
(handler_cls, items, args) = self._mapper.getController(path)
# Record handler as used
_used_handlers.add(handler_cls)
self._last_handler = handler_cls(items, args, None)
if not hasattr(self._last_handler, method.upper()):
raise http.HttpNotImplemented(message="Method not implemented")
except http.HttpException, ex:
code = ex.code
response = ex.message
else:
if not self._responses:
raise Exception("No responses")
(code, response) = self._responses.pop()
return (code, NotImplemented, response)
class TestConstants(unittest.TestCase):
def test(self):
self.assertEqual(client.GANETI_RAPI_PORT, constants.DEFAULT_RAPI_PORT)
self.assertEqual(client.GANETI_RAPI_VERSION, constants.RAPI_VERSION)
self.assertEqual(client.HTTP_APP_JSON, http.HTTP_APP_JSON)
self.assertEqual(client._REQ_DATA_VERSION_FIELD, rlib2._REQ_DATA_VERSION)
self.assertEqual(client.JOB_STATUS_QUEUED, constants.JOB_STATUS_QUEUED)
self.assertEqual(client.JOB_STATUS_WAITING, constants.JOB_STATUS_WAITING)
self.assertEqual(client.JOB_STATUS_CANCELING,
constants.JOB_STATUS_CANCELING)
self.assertEqual(client.JOB_STATUS_RUNNING, constants.JOB_STATUS_RUNNING)
self.assertEqual(client.JOB_STATUS_CANCELED, constants.JOB_STATUS_CANCELED)
self.assertEqual(client.JOB_STATUS_SUCCESS, constants.JOB_STATUS_SUCCESS)
self.assertEqual(client.JOB_STATUS_ERROR, constants.JOB_STATUS_ERROR)
self.assertEqual(client.JOB_STATUS_PENDING, constants.JOBS_PENDING)
self.assertEqual(client.JOB_STATUS_FINALIZED, constants.JOBS_FINALIZED)
self.assertEqual(client.JOB_STATUS_ALL, constants.JOB_STATUS_ALL)
# Node evacuation
self.assertEqual(client.NODE_EVAC_PRI, constants.NODE_EVAC_PRI)
self.assertEqual(client.NODE_EVAC_SEC, constants.NODE_EVAC_SEC)
self.assertEqual(client.NODE_EVAC_ALL, constants.NODE_EVAC_ALL)
# Legacy name
self.assertEqual(client.JOB_STATUS_WAITLOCK, constants.JOB_STATUS_WAITING)
# RAPI feature strings
self.assertEqual(client._INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client.INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client._INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client.INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client._NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client.NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client._NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
self.assertEqual(client.NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
def testErrors(self):
self.assertEqual(client.ECODE_ALL, errors.ECODE_ALL)
# Make sure all error codes are in both RAPI client and errors module
for name in filter(lambda s: (s.startswith("ECODE_") and s != "ECODE_ALL"),
dir(client)):
value = getattr(client, name)
self.assertEqual(value, getattr(errors, name))
self.assertTrue(value in client.ECODE_ALL)
self.assertTrue(value in errors.ECODE_ALL)
class RapiMockTest(unittest.TestCase):
def test404(self):
(code, _, body) = RapiMock().FetchResponse("/foo", "GET", None, None)
self.assertEqual(code, 404)
self.assertTrue(body is None)
def test501(self):
(code, _, body) = RapiMock().FetchResponse("/version", "POST", None, None)
self.assertEqual(code, 501)
self.assertEqual(body, "Method not implemented")
def test200(self):
rapi = RapiMock()
rapi.AddResponse("2")
(code, _, response) = rapi.FetchResponse("/version", "GET", None, None)
self.assertEqual(200, code)
self.assertEqual("2", response)
self.failUnless(isinstance(rapi.GetLastHandler(), rlib2.R_version))
def _FakeNoSslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, None, 0)
def _FakeFancySslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, "FancySSL/1.2.3", 0)
def _FakeOpenSslPycurlVersion():
# Note: incomplete version tuple
return (2, "7.15.5", 462597, "othersystem", 668, "OpenSSL/0.9.8c", 0)
def _FakeGnuTlsPycurlVersion():
# Note: incomplete version tuple
return (3, "7.18.0", 463360, "somesystem", 1581, "GnuTLS/2.0.4", 0)
class TestExtendedConfig(unittest.TestCase):
def testAuth(self):
cl = client.GanetiRapiClient("master.example.com",
username="user", password="pw",
curl_factory=lambda: rapi.testutils.FakeCurl(RapiMock()))
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.HTTPAUTH), pycurl.HTTPAUTH_BASIC)
self.assertEqual(curl.getopt(pycurl.USERPWD), "user:pw")
def testInvalidAuth(self):
# No username
self.assertRaises(client.Error, client.GanetiRapiClient,
"master-a.example.com", password="pw")
# No password
self.assertRaises(client.Error, client.GanetiRapiClient,
"master-b.example.com", username="user")
def testCertVerifyInvalidCombinations(self):
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True, cafile="cert1.pem")
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True, capath="certs/")
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True,
cafile="cert1.pem", capath="certs/")
def testProxySignalVerifyHostname(self):
for use_gnutls in [False, True]:
if use_gnutls:
pcverfn = _FakeGnuTlsPycurlVersion
else:
pcverfn = _FakeOpenSslPycurlVersion
for proxy in ["", "http://127.0.0.1:1234"]:
for use_signal in [False, True]:
for verify_hostname in [False, True]:
cfgfn = client.GenericCurlConfig(proxy=proxy, use_signal=use_signal,
verify_hostname=verify_hostname,
_pycurl_version_fn=pcverfn)
curl_factory =
|
astrobin/astrobin
|
astrobin/tests/test_collection.py
|
Python
|
agpl-3.0
| 11,674 | 0.002998 |
import re
import simplejson
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from astrobin.models import Collection, Image
from astrobin_apps_images.models import KeyValueTag
class CollectionTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', 'test@test.com', 'password')
self.user2 = User.objects.create_user('test2', 'test@test.com', 'password')
self.user.default_gallery_section = 5
self.user.save()
###########################################################################
# HELPERS #
###########################################################################
def _do_upload(self, filename, wip=False):
data = {'image_file': open(filename, 'rb')}
if wip:
data['wip'] = True
return self.client.post(
reverse('image_upload_process'),
data,
follow=True)
def _get_last_image(self):
return Image.objects_including_wip.all().order_by('-id')[0]
def _create_collection(self, user, name, description):
return self.client.post(
reverse('user_collections_create', args=(user.username,)),
{
'name': name,
'description': description,
},
follow=True
)
def _get_last_collection(self):
return Collection.objects.all().order_by('-id')[0]
###########################################################################
# View tests #
###########################################################################
def test_collections_list_view(self):
# Anon user, no collections
response = self.client.get(reverse('user_collections_list', args=(self.user.username,)))
self.assertContains(response, "This user does not have any collections")
# Other user, no collections
self.client.login(username='test2', password='password')
r
|
esponse = self.client.get(reverse('user_collectio
|
ns_list', args=(self.user.username,)))
self.assertContains(response, "This user does not have any collections")
self.client.logout()
# Owner, no collection
self.client.login(username='test', password='password')
response = self.client.get(reverse('user_collections_list', args=(self.user.username,)))
self.assertContains(response, "You do not have any collections")
self.client.logout()
# Create a collection
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
response = self._create_collection(self.user, 'test_collection', 'test_description')
image = self._get_last_image()
collection = self._get_last_collection()
self.assertEqual(collection.name, 'test_collection')
self.assertEqual(collection.description, 'test_description')
response = self.client.get(reverse('user_collections_list', args=(self.user.username,)))
self.assertContains(response, "test_collection")
# Collection has no images
self.assertContains(response, "collection-image empty")
def test_collection_update_view(self):
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
collection = self._get_last_collection()
self._do_upload('astrobin/fixtures/test.jpg')
image1 = self._get_last_image()
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
collection.images.add(image1)
collection.images.add(image2)
# Test that image2 is the cover (latest uploaded)
response = self.client.get(
reverse('user_collections_list', args=(self.user.username,))
)
self.assertIsNotNone(
re.search(
r'data-id="%d"\s+data-id-or-hash="%s"\s+data-alias="%s"' % (image2.pk, image2.get_id(), "collection"),
response.content.decode('utf-8')
)
)
response = self.client.post(
reverse('user_collections_update', args=(self.user.username, collection.pk)),
{
'name': 'edited_name',
'description': 'edited_description',
'cover': image1.pk,
},
follow=True
)
self.assertContains(response, "edited_name")
response = self.client.get(
reverse('user_collections_list', args=(self.user.username,))
)
self.assertIsNotNone(
re.search(
r'data-id="%d"\s+data-id-or-hash="%s"\s+data-alias="%s"' % (image1.pk, image1.get_id(), "collection"),
response.content.decode('utf-8')
)
)
def test_collection_delete_view(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self._create_collection(self.user, 'test_collection', 'test_description')
collection = self._get_last_collection()
response = self.client.post(
reverse('user_collections_delete', args=(self.user.username, collection.pk)),
follow=True)
self.assertNotContains(response, "test_collection")
def test_collection_add_remove_images_view(self):
# Create a collection
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
collection = self._get_last_collection()
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
response = self.client.get(
reverse('user_collections_add_remove_images', args=(self.user.username, collection.pk)),
)
self.assertEqual(response.status_code, 200)
self.client.post(
reverse('user_collections_add_remove_images', args=(self.user.username, collection.pk)),
{
'images[]': [image.pk, image2.pk],
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
follow=True)
self.assertEqual(collection.images.count(), 2)
def test_collection_order_by_tag(self):
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
self._do_upload('astrobin/fixtures/test.jpg')
image1 = self._get_last_image()
KeyValueTag.objects.create(image=image1, key="a", value=1)
KeyValueTag.objects.create(image=image1, key="b", value=2)
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
KeyValueTag.objects.create(image=image2, key="a", value=2)
KeyValueTag.objects.create(image=image2, key="b", value=1)
collection = Collection.objects.create(user=self.user, order_by_tag="a")
collection.images.add(image1, image2)
response = self.client.get(reverse('user_collections_detail', args=(self.user.username, collection.pk,)))
self.assertContains(response, image1.hash)
self.assertContains(response, image2.hash)
encoded_response = response.content.decode('utf-8')
self.assertTrue(encoded_response.find(image1.hash) < encoded_response.find(image2.hash))
collection.order_by_tag = "b"
collection.save()
response = self.client.get(reverse('user_collections_detail', args=(self.user.username, collection.pk,)))
self.assertContains(response, image1.hash)
self.assertContains(response, image2.hash)
encoded_response = response.content.decode('utf-8')
self.assertTrue(encoded_response.find(image2.hash) < encoded_response.find(image1.hash))
image2.keyvaluetags.filter(key="b").delete()
|
danielhers/ucca
|
ucca/tests/test_constructions.py
|
Python
|
gpl-3.0
| 2,357 | 0.00594 |
from collections import OrderedDict
import pytest
from ucca import textutil
from ucca.constructions import CATEGORIES_NAME, DEFAULT, CONSTRUCTIONS, extract_candidates
from .conftest import PASSAGES, loaded, loaded_valid, multi_sent, crossing, discontiguous, l1_passage, empty
"""Tests the constructions module functions and classes."""
def assert_spacy_not_loaded(*
|
args, **kwargs):
del args, kwargs
assert False, "Should not load spaCy when passage is pre-annotated"
def extract_and_check(p, constructions=None, expected=None):
d = OrderedDict((construction, [candidate.edge for candidate in candidates]) for construction, candidates in
extract_candidates(p, constructions=constructions).items() if candidates)
if expected is no
|
t None:
hist = {c.name: len(e) for c, e in d.items()}
assert hist == expected, " != ".join(",".join(sorted(h)) for h in (hist, expected))
@pytest.mark.parametrize("create, expected", (
(loaded, {'P': 1, 'remote': 1, 'E': 3, 'primary': 15, 'U': 2, 'F': 1, 'C': 3, 'A': 1, 'D': 1, 'L': 2, 'mwe': 2,
'H': 5, 'implicit': 1, 'main_rel': 1}),
(loaded_valid, {'P': 1, 'remote': 1, 'E': 3, 'primary': 15, 'U': 2, 'F': 1, 'C': 3, 'A': 1, 'D': 1, 'L': 2,
'mwe': 2, 'H': 5, 'implicit': 1, 'main_rel': 1}),
(multi_sent, {'U': 4, 'P': 3, 'mwe': 2, 'H': 3, 'primary': 6, 'main_rel': 2}),
(crossing, {'U': 3, 'P': 2, 'remote': 1, 'mwe': 1, 'H': 2, 'primary': 3, 'main_rel': 2}),
(discontiguous, {'G': 1, 'U': 2, 'E': 2, 'primary': 13, 'P': 3, 'F': 1, 'C': 1, 'A': 3, 'D': 2,
'mwe': 6, 'H': 3, 'implicit':3, 'main_rel': 2}),
(l1_passage, {'P': 2, 'mwe': 4, 'H': 3, 'primary': 11, 'U': 2, 'A': 5, 'D': 1, 'L': 2, 'remote': 2, 'S': 1,
'implicit':1, 'main_rel': 3}),
(empty, {}),
))
def test_extract_all(create, expected):
extract_and_check(create(), constructions=CONSTRUCTIONS, expected=expected)
@pytest.mark.parametrize("create", PASSAGES)
@pytest.mark.parametrize("constructions", (DEFAULT, [CATEGORIES_NAME]), ids=("default", CATEGORIES_NAME))
def test_extract(create, constructions, monkeypatch):
monkeypatch.setattr(textutil, "get_nlp", assert_spacy_not_loaded)
extract_and_check(create(), constructions=constructions)
|
sibson/vncdotool
|
vncdotool/rfb.py
|
Python
|
mit
| 35,587 | 0.005789 |
"""
RFB protocol implementattion, client side.
Override RFBClient and RFBFactory in your application.
See vncviewer.py for an example.
Reference:
http://www.realvnc.com/docs/rfbproto.pdf
(C) 2003 cliechti@gmx.net
MIT License
"""
# flake8: noqa
import sys
import math
import zlib
import getpass
import os
from Crypto.Cipher import AES
from Crypto.Hash import MD5
from Crypto.Util.Padding import pad
from Crypto.Util.number import bytes_to_long, long_to_bytes
from struct import pack, unpack
from . import pyDes
from twisted.python import usage, log
from twisted.internet.protocol import Protocol
from twisted.internet import protocol
from twisted.application import internet, service
#~ from twisted.internet import reactor
# Python3 compatibility replacement for ord(str) as ord(byte)
if sys.version_info[0] >= 3:
original_ord = ord
def ord(x):
# in python 2, there are two possible cases ord is used.
# * string of length > 1, --(index access)--> string of length 1 --(ord)--> int
# * string of length 1 --(ord)--> int
# however in python3, this usage morphs into
# * byte of length > 1, --(index access)--> int --(ord)--> Error
# * byte of length 1 --(ord)--> int
if isinstance(x, int):
return x
elif isinstance(x, bytes) or isinstance(x, str):
return original_ord(x)
else:
raise TypeError(f"our customized ord takes an int, a byte, or a str. Got {type(x)} : {x}")
#encoding-type
#for SetEncodings()
RAW_ENCODING = 0
COPY_RECTANGLE_ENCODING = 1
RRE_ENCODING = 2
CORRE_ENCODING = 4
HEXTILE_ENCODING = 5
ZLIB_ENCODING = 6
TIGHT_ENCODING = 7
ZLIBHEX_ENCODING = 8
ZRLE_ENCODING = 16
#0xffffff00 to 0xffffffff tight options
PSEUDO_CURSOR_ENCODING = -239
PSEUDO_DESKTOP_SIZE_ENCODING = -223
#keycodes
#for KeyEvent()
KEY_BackSpace = 0xff08
KEY_Tab = 0xff09
KEY_Return = 0xff0d
KEY_Escape = 0xff1b
KEY_Insert = 0xff63
KEY_Delete = 0xffff
KEY_Home = 0xff50
KEY_End = 0xff57
KEY_PageUp = 0xff55
KEY_PageDown = 0xff56
KEY_Left = 0xff51
KEY_Up = 0xff52
KEY_Right = 0xff53
KEY_Down = 0xff54
KEY_F1 = 0xffbe
KEY_F2 = 0xffbf
KEY_F3 = 0xffc0
KEY_F4 = 0xffc1
KEY_F5 = 0xffc2
KEY_F6 = 0xffc3
KEY_F7 = 0xffc4
KEY_F8 = 0xffc5
KEY_F9 = 0xffc6
KEY_F10 = 0xffc7
KEY_F11 = 0xffc8
KEY_F12 = 0xffc9
KEY_F13 = 0xFFCA
KEY_F14 = 0xFFCB
KEY_F15 = 0xFFCC
KEY_F16 = 0xFFCD
KEY_F17 = 0xFFCE
KEY_F18 = 0xFFCF
KEY_F19 = 0xFFD0
KEY_F20 = 0xFFD1
KEY_ShiftLeft = 0xffe1
KEY_ShiftRight = 0xffe2
KEY_ControlLeft = 0xffe3
KEY_ControlRight = 0xffe4
KEY_MetaLeft = 0xffe7
KEY_MetaRight = 0xffe8
KEY_AltLeft = 0xffe9
KEY_AltRight = 0xffea
KEY_Scroll_Lock = 0xFF14
KEY_Sys_Req = 0xFF15
KEY_Num_Lock = 0xFF7F
KEY_Caps_Lock = 0xFFE5
KEY_Pause = 0xFF13
KEY_Super_L = 0xFFEB
KEY_Super_R = 0xFFEC
KEY_Hyper_L = 0xFFED
KEY_Hyper_R = 0xFFEE
KEY_KP_0 = 0xFFB0
KEY_KP_1 = 0xFFB1
KEY_KP_2 = 0xFFB2
KEY_KP_3 = 0xFFB3
KEY_KP_4 = 0xFFB4
KEY_KP_5 = 0xFFB5
KEY_KP_6 = 0xFFB6
KEY_KP_7 = 0xFFB7
KEY_KP_8 = 0xFFB8
KEY_KP_9 = 0xFFB9
KEY_KP_Enter = 0xFF8D
KEY_ForwardSlash = 0x002F
KEY_BackSlash = 0x005C
KEY_SpaceBar= 0x0020
# ZRLE helpers
def _zrle_next_bit(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(8):
value = b >> (7 - n)
yield value & 1
num_pixels += 1
if num_pixels == pixels_in_tile:
return
def _zrle_next_dibit(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(0, 8, 2):
value = b >> (6 - n)
yield value & 3
num_pixels += 1
if num_pixels == pixels_in_tile:
return
def _zrle_next_nibble(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(0, 8, 4):
value = b >> (4 - n)
yield value & 15
num_pixels += 1
if num_pixels == pixels_in_tile:
return
class RFBClient(Protocol):
def __init__(self):
self._packet = []
self._packet_len = 0
self._handler = self._handleInitial
self._already_expecting = 0
self._version = None
self._version_server = None
self._zlib_stream = zlib.decompressobj(0)
#---------------------------------------------------
|
---
# states used on connection startup
#------------------------------------------------------
def _handleInitial(self):
buffer = b''.join(self._packet)
if b'\n' in buffer:
version = 3.3
if buffer[:3] == b'RFB':
version_server = float(buffer[3:-1].replace(b'0', b''))
SUPPO
|
RTED_VERSIONS = (3.3, 3.7, 3.8)
if version_server == 3.889: # Apple Remote Desktop
version_server = 3.8
if version_server in SUPPORTED_VERSIONS:
version = version_server
else:
log.msg("Protocol version %.3f not supported"
% version_server)
version = max(filter(
lambda x: x <= version_server, SUPPORTED_VERSIONS))
buffer = buffer[12:]
log.msg("Using protocol version %.3f" % version)
parts = str(version).split('.')
self.transport.write(
bytes(b"RFB %03d.%03d\n" % (int(parts[0]), int(parts[1]))))
self._packet[:] = [buffer]
self._packet_len = len(buffer)
self._handler = self._handleExpected
self._version = version
self._version_server = version_server
if version < 3.7:
self.expect(self._handleAuth, 4)
else:
self.expect(self._handleNumberSecurityTypes, 1)
else:
self._packet[:] = [buffer]
self._packet_len = len(buffer)
def _handleNumberSecurityTypes(self, block):
(num_types,) = unpack("!B", block)
if num_types:
self.expect(self._handleSecurityTypes, num_types)
else:
self.expect(self._handleConnFailed, 4)
def _handleSecurityTypes(self, block):
types = unpack("!%dB" % len(block), block)
SUPPORTED_TYPES = (1, 2, 30)
valid_types = [sec_type for sec_type in types if sec_type in SUPPORTED_TYPES]
if valid_types:
sec_type = max(valid_types)
self.transport.write(pack("!B", sec_type))
if sec_type == 1:
if self._version < 3.8:
self._doClientInitialization()
else:
self.expect(self._handleVNCAuthResult, 4)
elif sec_type == 2:
self.expect(self._handleVNCAuth, 16)
elif sec_type == 30: # Apple Remote Desktop
self.expect(self._handleAppleAuth, 4)
else:
log.msg("unknown security types: %s" % repr(types))
def _handleAuth(self, block):
(auth,) = unpack("!I", block)
#~ print "auth:", auth
if auth == 0:
self.expect(self._handleConnFailed, 4)
elif auth == 1:
self._doClientInitialization()
return
elif auth == 2:
self.expect(self._handleVNCAuth, 16)
else:
log.msg("unknown auth response (%d)" % auth)
def _handleConnFailed(self, block):
(waitfor,) = unpack("!I", block)
self.expect(self._handleConnMessage, waitfor)
def _handleConnMessage(self, block):
log.msg("Connection refused: %r" % block)
def _handleVNCAuth(self, block):
self._challenge = block
self.vncRequestPassword()
self.expect(self._handleVNCAuthResult, 4)
def _handl
|
mrwangxc/zstack-utility
|
zstackctl/zstackctl/ctl.py
|
Python
|
apache-2.0
| 302,359 | 0.005626 |
#!/usr/bin/python
import argparse
import sys
import os
import subprocess
import signal
import getpass
import simplejson
from termcolor import colored
import ConfigParser
import StringIO
import functools
import time
import random
import string
from configobj import ConfigObj
import tempfile
import pwd, grp
import traceback
import uuid
import yaml
import re
from zstacklib import *
import jinja2
import socket
import struct
import fcntl
import commands
import threading
import itertools
import platform
from datetime import datetime, timedelta
import multiprocessing
mysql_db_config_script='''
echo "modify my.cnf"
if [ -f /etc/mysql/mariadb.conf.d/50-server.cnf ]; then
#ubuntu 16.04
mysql_conf=/etc/mysql/mariadb.conf.d/50-server.cnf
elif [ -f /etc/mysql/my.cnf ]; then
# Ubuntu 14.04
mysql_conf=/etc/mysql/my.cnf
elif [ -f /etc/my.cnf ]; then
# centos
mysql_conf=/etc/my.cnf
fi
sed -i 's/^bind-address/#bind-address/' $mysql_conf
sed -i 's/^skip-networking/#skip-networking/' $mysql_conf
sed -i 's/^bind-address/#bind-address/' $mysql_conf
grep 'binlog_format=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a binlog_format=mixed\' $mysql_conf
fi
grep 'log_bin_trust_function_creators=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log_bin_trust_function_creators=1"
sed -i '/\[mysqld\]/a log_bin_trust_function_creators=1\' $mysql_conf
fi
grep 'expire_logs=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "expire_logs=30"
sed -i '/\[mysqld\]/a expire_logs=30\' $mysql_conf
fi
grep 'max_binlog_size=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_binlog_size=500m"
sed -i '/\[mysqld\]/a max_binlog_size=500m\' $mysql_conf
fi
grep 'log-bin=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log-bin=mysql-binlog"
sed -i '/\[mysqld\]/a log-bin=mysql-binlog\' $mysql_conf
fi
grep 'max_connections' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_connections=1024"
sed -i '/\[mysqld\]/a max_connections=1024\' $mysql_conf
else
echo "max_connections=1024"
sed -i 's/max_connections.*/max_connections=1024/g' $mysql_conf
fi
grep '^character-set-server' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a character-set-server=utf8\' $mysql_conf
fi
grep '^skip-name-resolve' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
sed -i '/\[mysqld\]/a skip-name-resolve\' $mysql_conf
fi
grep 'tmpdir' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
mysql_tmp_path="/var/lib/mysql/tmp"
if [ ! -x "$mysql_tmp_path" ]; then
mkdir "$mysql_tmp_path"
chown mysql:mysql "$mysql_tmp_path"
chmod 1777 "$mysql_tmp_path"
fi
echo "tmpdir=/var/lib/mysql/tmp"
sed -i '/\[mysqld\]/a tmpdir=/var/lib/mysql/tmp\' $mysql_conf
fi
'''
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def loop_until_timeout(timeout, interval=1):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
current_time = time.time()
expired = current_time + timeout
while current_time < expired:
if f(*args, **kwargs):
return True
time.sleep(interval)
current_time = time.time()
return False
return inner
return wrap
def find_process_by_cmdline(cmdlines):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
with open(os.path.join('/proc', pid, 'cmdline'), 'r') as fd:
cmdline = fd.read()
is_find = True
for c in cmdline
|
s:
if c not in cmdline:
is_find = False
break
if not is_find:
continue
return pid
except IOError:
continue
return None
def ssh_run_full(ip, cmd, params=[], pipe=True):
remote_path = '/tmp/%s.sh' % uuid.uuid4()
scr
|
ipt = '''/bin/bash << EOF
cat << EOF1 > %s
%s
EOF1
/bin/bash %s %s
ret=$?
rm -f %s
exit $ret
EOF''' % (remote_path, cmd, remote_path, ' '.join(params), remote_path)
scmd = ShellCmd('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "%s"' % (ip, script), pipe=pipe)
scmd(False)
return scmd
def ssh_run(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
def ssh_run_no_pipe(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params, False)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
class CtlError(Exception):
pass
def warn(msg):
sys.stdout.write(colored('WARNING: %s\n' % msg, 'yellow'))
def error(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
sys.exit(1)
def error_not_exit(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
def info(*msg):
if len(msg) == 1:
out = '%s\n' % ''.join(msg)
else:
out = ''.join(msg)
sys.stdout.write(out)
def get_detail_version():
detailed_version_file = os.path.join(ctl.zstack_home, "VERSION")
if os.path.exists(detailed_version_file):
with open(detailed_version_file, 'r') as fd:
detailed_version = fd.read()
return detailed_version
else:
return None
def check_ip_port(host, port):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, int(port)))
return result == 0
def compare_version(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version2), normalize(version1))
def get_zstack_version(db_hostname, db_port, db_user, db_password):
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select version from schema_version order by version desc"
ret = query.query()
versions = [r['version'] for r in ret]
versions.sort(cmp=compare_version)
version = versions[0]
return version
def get_default_gateway_ip():
'''This function will return default route gateway ip address'''
with open("/proc/net/route") as gateway:
try:
for item in gateway:
fields = item.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
if fields[7] == '00000000':
return socket.inet_ntoa(struct.pack("=L", int(fields[2], 16)))
except ValueError:
return None
def get_default_ip():
cmd = ShellCmd("""dev=`ip route|grep default|head -n 1|awk -F "dev" '{print $2}' | awk -F " " '{print $1}'`; ip addr show $dev |grep "inet "|awk '{print $2}'|head -n 1 |awk -F '/' '{print $1}'""")
cmd(False)
return cmd.stdout.strip()
def get_yum_repo_from_property():
yum_repo = ctl.read_property('Ansible.var.zstack_repo')
if not yum_repo:
return yum_repo
# avoid http server didn't start when install package
if 'zstack-mn' in yum_repo:
yum_repo = yum_repo.replace("zstack-mn","zstack-local")
if 'qemu-kvm-ev-mn' in yum_repo:
yum_repo = yum_repo.replace("qemu-kvm-ev-mn","qemu-kvm-ev")
return yum_repo
def get_host_list(table_name):
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select * from %s" % table_name
host_vo = query.query()
return host_vo
def get_vrouter_list():
ip_list = []
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password
|
HelloLily/hellolily
|
lily/hubspot/prefetch_objects.py
|
Python
|
agpl-3.0
| 2,414 | 0.000829 |
from django.db.models import Prefetch, Case, When, Value, IntegerField, Q
from lily.accounts.models import Website, Account
from lily.integrations.models import Document
from lily.notes.models import Note
from lily.socialmedia.models import SocialMedia
from lily.tags.models import Tag
from lily.utils.models.models import Address, PhoneNumber, EmailAddress
website_prefetch = Prefetch(
lookup='websites',
queryset=Website.objects.exclude(Q(website='http://') | Q(website='https://')).order_by('-is_primary').all(),
to_attr='prefetched_websites'
)
addresses_prefetch = Prefetch(
lookup='addresses',
queryset=Address.objects.all(),
to_attr='prefetched_addresses'
)
phone_prefetch = Prefetc
|
h(
looku
|
p='phone_numbers',
queryset=PhoneNumber.objects.filter(
status=PhoneNumber.ACTIVE_STATUS
).annotate(
custom_order=Case(
When(type='work', then=Value(1)),
When(type='mobile', then=Value(2)),
When(type='home', then=Value(3)),
When(type='other', then=Value(4)),
When(type='fax', then=Value(5)),
output_field=IntegerField(),
)
).order_by('custom_order'),
to_attr='prefetched_phone_numbers'
)
social_media_prefetch = Prefetch(
lookup='social_media',
queryset=SocialMedia.objects.all(),
to_attr='prefetched_social_media'
)
notes_prefetch = Prefetch(
lookup='notes',
queryset=Note.objects.filter(is_deleted=False),
to_attr='prefetched_notes'
)
pinned_notes_prefetch = Prefetch(
lookup='notes',
queryset=Note.objects.filter(is_deleted=False, is_pinned=True),
to_attr='prefetched_pinned_notes'
)
tags_prefetch = Prefetch(
lookup='tags',
queryset=Tag.objects.all(),
to_attr='prefetched_tags'
)
accounts_prefetch = Prefetch(
lookup='accounts',
queryset=Account.objects.filter(is_deleted=False),
to_attr='prefetched_accounts'
)
email_addresses_prefetch = Prefetch(
lookup='email_addresses',
queryset=EmailAddress.objects.exclude(status=EmailAddress.INACTIVE_STATUS).order_by('-status'),
to_attr='prefetched_email_addresses'
)
twitter_prefetch = Prefetch(
lookup='social_media',
queryset=SocialMedia.objects.filter(name='twitter'),
to_attr='prefetched_twitters'
)
document_prefetch = Prefetch(
lookup='document_set',
queryset=Document.objects.all(),
to_attr='prefetched_documents'
)
|
expectocode/Telethon
|
telethon/client/chats.py
|
Python
|
mit
| 42,127 | 0.000688 |
import asyncio
import inspect
import itertools
import string
import typing
from .. import helpers, utils, hints
from ..requestiter import RequestIter
from ..tl import types, functions, custom
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
_MAX_PARTICIPANTS_CHUNK_SIZE = 200
_MAX_ADMIN_LOG_CHUNK_SIZE = 100
_MAX_PROFILE_PHOTO_CHUNK_SIZE = 100
class _ChatAction:
_str_mapping = {
'typing': types.SendMessageTypingAction(),
'contact': types.SendMessageChooseContactAction(),
'game': types.SendMessageGamePlayAction(),
'location': types.SendMessageGeoLocationAction(),
'record-audio': types.SendMessageRecordAudioAction(),
'record-voice': types.SendMessageRecordAudioAction(), # alias
'record-round': types.SendMessageRecordRoundAction(),
'record-video': types.SendMessageRecordVideoAction(),
'audio': types.SendMessageUploadAudioAction(1),
'voice': types.SendMessageUploadAudioAction(1), # alias
'song': types.SendMessageUploadAudioAction(1), # alias
'round': types.SendMessageUploadRoundAction(1),
'video': types.SendMessageUploadVideoAction(1),
'photo': types.SendMessageUploadPhotoAction(1),
'document': types.SendMessageUploadDocumentAction(1),
'file': types.SendMessageUploadDocumentAction(1), # alias
'cancel': types.SendMessageCancelAction()
}
def __init__(self, client, chat, action, *, delay, auto_cancel):
self._client = client
self._chat = chat
self._action = action
self._delay = delay
self._auto_cancel = auto_cancel
self._request = None
self._task = None
self._running = False
async def __aenter__(self):
self._chat = await self._client.get_input_entity(self._chat)
# Since `self._action` is passed by reference we can avoid
# recreating the request all the time and still modify
# `self._action.progress` directly in `progress`.
self._request = functions.messages.SetTypingRequest(
self._chat, self._action)
self._running = True
self._task = self._client.loop.create_task(self._update())
return self
async def __aexit__(self, *args):
self._running = False
if self._task:
self._task.cancel()
try:
await self._task
except asyncio.CancelledError:
pass
self._task = No
|
ne
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
async def _update(self):
try:
while self._running:
await self._client(self._request)
await asyncio.sleep(self._delay)
except ConnectionError:
pass
except asyncio.CancelledError:
if self._auto_cancel:
await self._client(functio
|
ns.messages.SetTypingRequest(
self._chat, types.SendMessageCancelAction()))
def progress(self, current, total):
if hasattr(self._action, 'progress'):
self._action.progress = 100 * round(current / total)
class _ParticipantsIter(RequestIter):
async def _init(self, entity, filter, search, aggressive):
if isinstance(filter, type):
if filter in (types.ChannelParticipantsBanned,
types.ChannelParticipantsKicked,
types.ChannelParticipantsSearch,
types.ChannelParticipantsContacts):
# These require a `q` parameter (support types for convenience)
filter = filter('')
else:
filter = filter()
entity = await self.client.get_input_entity(entity)
ty = helpers._entity_type(entity)
if search and (filter or ty != helpers._EntityType.CHANNEL):
# We need to 'search' ourselves unless we have a PeerChannel
search = search.casefold()
self.filter_entity = lambda ent: (
search in utils.get_display_name(ent).casefold() or
search in (getattr(ent, 'username', None) or '').casefold()
)
else:
self.filter_entity = lambda ent: True
# Only used for channels, but we should always set the attribute
self.requests = []
if ty == helpers._EntityType.CHANNEL:
self.total = (await self.client(
functions.channels.GetFullChannelRequest(entity)
)).full_chat.participants_count
if self.limit <= 0:
raise StopAsyncIteration
self.seen = set()
if aggressive and not filter:
self.requests.extend(functions.channels.GetParticipantsRequest(
channel=entity,
filter=types.ChannelParticipantsSearch(x),
offset=0,
limit=_MAX_PARTICIPANTS_CHUNK_SIZE,
hash=0
) for x in (search or string.ascii_lowercase))
else:
self.requests.append(functions.channels.GetParticipantsRequest(
channel=entity,
filter=filter or types.ChannelParticipantsSearch(search),
offset=0,
limit=_MAX_PARTICIPANTS_CHUNK_SIZE,
hash=0
))
elif ty == helpers._EntityType.CHAT:
full = await self.client(
functions.messages.GetFullChatRequest(entity.chat_id))
if not isinstance(
full.full_chat.participants, types.ChatParticipants):
# ChatParticipantsForbidden won't have ``.participants``
self.total = 0
raise StopAsyncIteration
self.total = len(full.full_chat.participants.participants)
users = {user.id: user for user in full.users}
for participant in full.full_chat.participants.participants:
user = users[participant.user_id]
if not self.filter_entity(user):
continue
user = users[participant.user_id]
user.participant = participant
self.buffer.append(user)
return True
else:
self.total = 1
if self.limit != 0:
user = await self.client.get_entity(entity)
if self.filter_entity(user):
user.participant = None
self.buffer.append(user)
return True
async def _load_next_chunk(self):
if not self.requests:
return True
# Only care about the limit for the first request
# (small amount of people, won't be aggressive).
#
# Most people won't care about getting exactly 12,345
# members so it doesn't really matter not to be 100%
# precise with being out of the offset/limit here.
self.requests[0].limit = min(
self.limit - self.requests[0].offset, _MAX_PARTICIPANTS_CHUNK_SIZE)
if self.requests[0].offset > self.limit:
return True
results = await self.client(self.requests)
for i in reversed(range(len(self.requests))):
participants = results[i]
if not participants.users:
self.requests.pop(i)
continue
self.requests[i].offset += len(participants.participants)
users = {user.id: user for user in participants.users}
for participant in participants.participants:
user = users[participant.user_id]
if not self.filter_entity(user) or user.id in self.seen:
continue
self.seen.add(participant.user_id)
user = users[participant.user_id]
user.participant = participant
self.buffer.append(user)
class _AdminLogIter(RequestIter):
async def _init(
self, entity, admins, search, min_id, max_id,
join, leave, invite, restrict, unrestrict, ban, unban,
|
xairy/mipt-schedule-parser
|
msp/test/schedule_tests.py
|
Python
|
mit
| 8,974 | 0.007132 |
#!/usr/bin/python
#coding: utf-8
from __future__ import unicode_literals
import os
import unittest
import xlrd
import msp.schedule_parser as schedule_parser
__author__ = "Andrey Konovalov"
__copyright__ = "Copyright (C) 2014 Andrey Konovalov"
__license__ = "MIT"
__version__ = "0.1"
this_dir, this_filename = os.path.split(__file__)
SCHEDULE_PATH = os.path.join(this_dir, "..", "data", "2013_fall", "4kurs.xls")
class WeekdayRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetWeekdayRange(0), (4, 11))
self.assertEqual(self.schedule.GetWeekdayRange(1), (12, 19))
self.assertEqual(self.schedule.GetWeekdayRange(2), (20, 27))
self.assertEqual(self.schedule.GetWeekdayRange(3), (28, 37))
self.assertEqual(self.schedule.GetWeekdayRange(4), (38, 47))
self.assertEqual(self.schedule.GetWeekdayRange(5), (48, 57))
class DepartmentCountTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentCount(), 9)
class DepartmentRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentRange(0), (2, 11))
self.ass
|
ertEqual(self.schedule.GetDepartmentRange(1), (13, 20))
self.assertEqual(self.schedule.GetDepartmentRange(2), (22, 32))
self.assertEqual(self.schedule.GetDepartmentRange(3), (34, 36))
self.assertEqual(self.schedule.GetDepartmentRange(4), (38, 43))
self.assertEqual(self.schedule.GetDepartmentRange(5), (45, 53))
self.assertEqual(self.schedule.GetDepartmentRange(6), (55, 62))
self.assertEqual(self.schedule.GetDepartmentRange(7), (64, 71))
self.assertEqual(self.schedule.GetDepartme
|
ntRange(8), (73, 77))
class DepartmentsRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentsRow(), 3)
class HoursColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetHoursColumn(), 1)
class HoursRangesTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetHoursRanges(0), [(4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11)])
self.assertEqual(self.schedule.GetHoursRanges(3), [(28, 30), (30, 31), (31, 32), (32, 34), (34, 35), (35, 36), (36, 37)])
self.assertEqual(self.schedule.GetHoursRanges(5), [(48, 49), (49, 50), (50, 52), (52, 53), (53, 54), (54, 56), (56, 57)])
class GroupCountTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupCount(0), 9)
self.assertEqual(self.schedule.GetGroupCount(1), 7)
self.assertEqual(self.schedule.GetGroupCount(2), 8)
self.assertEqual(self.schedule.GetGroupCount(3), 2)
self.assertEqual(self.schedule.GetGroupCount(4), 5)
self.assertEqual(self.schedule.GetGroupCount(5), 8)
self.assertEqual(self.schedule.GetGroupCount(6), 7)
self.assertEqual(self.schedule.GetGroupCount(7), 7)
self.assertEqual(self.schedule.GetGroupCount(8), 4)
class GroupListTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupList(0), ['011', '012', '013', '014', '015', '016', '017', '018', '019'])
self.assertEqual(self.schedule.GetGroupList(1), ['021', '022', '023', '024', '025', '026', '028'])
self.assertEqual(self.schedule.GetGroupList(3), ['041', '042'])
self.assertEqual(self.schedule.GetGroupList(8), ['0111', '0112', '0113', '0114'])
class GroupRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupRange(0, 0), (2, 3))
self.assertEqual(self.schedule.GetGroupRange(0, 1), (3, 4))
self.assertEqual(self.schedule.GetGroupRange(2, 1), (23, 25))
self.assertEqual(self.schedule.GetGroupRange(2, 2), (25, 26))
self.assertEqual(self.schedule.GetGroupRange(2, 3), (26, 28))
self.assertEqual(self.schedule.GetGroupRange(5, 3), (48, 49))
self.assertEqual(self.schedule.GetGroupRange(8, 0), (73, 74))
self.assertEqual(self.schedule.GetGroupRange(8, 3), (76, 77))
class WeekdayByRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetWeekdayByRow(4), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(5), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(10), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(13), 1)
self.assertEqual(self.schedule.GetWeekdayByRow(25), 2)
self.assertEqual(self.schedule.GetWeekdayByRow(26), 2)
self.assertEqual(self.schedule.GetWeekdayByRow(28), 3)
self.assertEqual(self.schedule.GetWeekdayByRow(44), 4)
self.assertEqual(self.schedule.GetWeekdayByRow(48), 5)
self.assertEqual(self.schedule.GetWeekdayByRow(56), 5)
class PairByRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetPairByRow(4), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(5), (1, 0))
self.assertEqual(self.schedule.GetPairByRow(10), (6, 0))
self.assertEqual(self.schedule.GetPairByRow(12), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(28), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(29), (0, 1))
self.assertEqual(self.schedule.GetPairByRow(30), (1, 0))
self.assertEqual(self.schedule.GetPairByRow(33), (3, 1))
self.assertEqual(self.schedule.GetPairByRow(56), (6, 0))
class DepartmentByColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(2), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(3), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(10), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(13), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(18), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(19), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(22), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(24), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(31), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(39), 4)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(64), 7)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(70), 7)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(73), 8)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(76), 8)
class GroupByColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupIndexByColumn(2), (0, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(3), (1, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(10), (8, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(23), (1, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(24), (1, 1))
self.assertEqual(self.schedule.GetGroupIndexByColumn(25), (2, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(26),
|
ArtifexSoftware/mupdf
|
scripts/jlib.py
|
Python
|
agpl-3.0
| 81,616 | 0.004717 |
import codecs
import doctest
import inspect
import io
import os
import platform
import re
import shutil
import subprocess
import sys
import textwrap
import time
import traceback
import types
def place( frame_record):
'''
Useful debugging function - returns representation of source position of
caller.
'''
filename = frame_record.filename
line = frame_record.lineno
function = frame_record.function
ret = os.path.split( filename)[1] + ':' + str( line) + ':' + function + ':'
if 0: # lgtm [py/unreachable-statement]
tid = str( threading.currentThread())
ret = '[' + tid + '] ' + ret
return ret
def expand_nv( text, caller=1):
'''
Returns <text> with special handling of {<expression>} items.
text:
String containing {<expression>} items.
caller:
If an int, the number of frames to step up when looking for file:line
information or evaluating expressions.
Otherwise should be a frame record as returned by inspect.stack()[].
<expression> is evaluated in <caller>'s context using eval(), and expanded
to <expression> or <expression>=<value>.
If <expression> ends with '=', this character is removed and we prefix the
result with <expression>=.
>>> x = 45
>>> y = 'hello'
>>> expand_nv( 'foo {x} {y=}')
'foo 45 y=hello'
<expression> can also use ':' and '!' to control formatting, like
str.format().
>>> x = 45
>>> y = 'hello'
>>> expand_nv( 'foo {x} {y!r=}')
"foo 45 y='hello'"
If <expression> starts with '=', this character is removed and we show each
space-separated item in the remaining text as though it was appended with
'='.
>>> foo = 45
>>> y = 'hello'
>>> expand_nv('{=foo y}')
'foo=45 y=hello'
'''
if isinstance( caller, int):
frame_record = inspect.stack()[ caller]
else:
frame_record = caller
frame = frame_record.frame
try:
def get_items():
'''
Yields (pre, item), where <item> is contents of next {...} or None,
and <pre> is preceding text.
'''
pos = 0
pre = ''
while 1:
if pos == len( text):
yield pre, None
break
rest = text[ pos:]
if rest.startswith( '{{') or rest.startswith( '}}'):
pre += rest[0]
pos += 2
elif text[ pos] == '{':
close = text.find( '}', pos)
if close < 0:
raise Exception( 'After "{" at offset %s, cannot find closing "}". text is: %r' % (
pos, text))
text2 = text[ pos+1 : close]
if text2.startswith('='):
text2 = text2[1:]
for i, text3 in enumerate(text2.split()):
pre2 = ' ' if i else pre
yield pre2, text3 + '='
else:
yield pre, text[ pos+1 : close]
pre = ''
pos = close + 1
else:
pre += text[ pos]
pos += 1
ret = ''
for pre, item in get_items():
ret += pre
nv = False
if item:
if item.endswith( '='):
nv = True
item = item[:-1]
expression, tail = split_first_of( item, '!:')
try:
value = eval( expression, frame.f_globals, frame.f_locals)
value_text = ('{0%s}' % tail).format( value)
except Exception as e:
value_text = '{??Failed to evaluate %r in context %s:%s; expression=%r tail=%r: %s}' % (
expression,
frame_record.filename,
frame_record.lineno,
expression,
tail,
e,
)
if nv:
ret += '%s=' % expression
ret += value_text
return ret
finally:
del frame # lgtm [py/unnecessary-delete]
class LogPrefixTime:
def __init__( self, date=False, time_=True, elapsed=False):
self.date = date
self.time = time_
self.elapsed = elapsed
self.t0 = time.time()
def __call__( self):
ret = ''
if self.date:
ret += time.strftime( ' %F')
if self.time:
ret += time.strftime( ' %T')
if self.elapsed:
ret += ' (+%s)' % time_duration( time.time() - self.t0, s_format='%.1f')
if ret:
ret = ret.strip() + ': '
return ret
class LogPrefixFileLine:
def __call__( self, caller):
if isinstance( caller, int):
caller = inspect.stack()[ caller]
return place( caller) + ' '
class LogPrefixScopes:
'''
Internal use only.
'''
def __init__( self):
self.items = []
def __call__( self):
ret = ''
for item in self.items:
if callable( item):
item = item()
ret += item
return ret
class LogPrefixScope:
'''
Can be used to insert scoped prefix to log output.
'''
def __init__( self, prefix):
self.prefix = prefix
def __enter__( self):
g_log_prefix_scopes.items.append( self.prefix)
def __exit__( self, exc_type, exc_value, traceback):
global g_log_prefix
g_log_prefix_scopes.items.pop()
g_log_delta = 0
class LogDeltaScope:
'''
Can be used to temporarily change verbose level of logging.
E.g to temporarily increase logging:
with jlib.LogDeltaScope(-1):
...
'''
def __init__( self, delta):
self.delta = delta
global g_log_delta
g_log_delta += self.delta
def __enter__( self):
pass
def __exit__( self, exc_type, exc_value, traceback):
global g_log_delta
g_log_delta -= self.delta
# Special item that can be inserted into <g_log_prefixes> to enable
# temporary addition of text into log prefixes.
#
g_log_prefix_scopes = LogPrefixScopes()
# List of items that form prefix for all output from log().
#
g_log_prefixes = [
LogPrefixTime( time_=False, elapsed=True),
g_log_prefix_scopes,
LogPrefixFileLine(),
]
_log_text_line_start = True
def log_text( text=None, caller=1, nv=True, raw=False):
'''
Returns log text, prepending all lines with text from g_log_prefixes.
text:
The text to output. Each line is prepended with prefix text.
caller:
If an int, the number of frames to step up when looking for file:line
information or evaluating expressions.
Otherwise should be a frame record as returned by inspect.stack()[].
nv:
If true, we expand {...} in <text> using expand_nv().
'''
if isinstance( caller, int):
caller += 1
# Construct line prefix.
prefix = ''
for p in g_log_prefixes:
if callable( p):
if isinstance( p, LogPrefixFileLine):
p = p(caller)
else:
p = p()
prefix += p
if text is None:
return prefix
# Expand {...} using our enhanced f-string support.
if nv:
text = expand_nv( text, caller)
# Prefix each line. If <raw> is false, we terminate the last line with a
# newline. Otherwise we use _log_text_line_start to remember whether we are
# at the beginning of a line.
#
|
global _log_text_line_start
text2 = ''
pos = 0
while 1:
if pos == len(text):
break
if not raw or _log_text_line_start:
text2 += prefix
nl = text.find('\n', pos)
if nl == -1:
text2 += text[pos:]
if not raw:
text2 += '\n'
|
pos = len(text)
else:
|
Iconik/eve-suite
|
src/model/static/inv/control_tower_resources.py
|
Python
|
gpl-3.0
| 1,085 | 0.00553 |
from collections import namedtuple
from model.flyweight import Flyweight
from model.static.database import database
class ControlTowerResource(Flyweight):
def __init__(self,control_tower_type_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.control_tower_type_id = control_tower_type_id
cursor = database.get_cursor(
"select * from invControlTowerResources where controlTowerTypeID={};".format(self.control_tower_type_id))
|
self.resources = list()
resource_tuple = namedtuple("resource_tuple",
"resource_type_id purpose quantity min_security_level faction_id ")
for row in cursor:
self.resources.append(resource_tuple(
resource_type_id=row["resourceTypeID"],
purpose=row["purpose"],
quantity=row["quantity"],
min_security_level=row["minSecurityLevel"],
faction_id=row["factionID"]))
cursor.close()
|
|
voutilad/courtlistener
|
cl/corpus_importer/import_columbia/parse_opinions.py
|
Python
|
agpl-3.0
| 15,489 | 0.002066 |
# -*- coding: utf-8 -*-
# Functions to parse court data in XML format into a list of dictionaries.
import hashlib
import os
import re
import xml.etree.cElementTree as ET
import dateutil.parser as dparser
from juriscraper.lib.string_utils import titlecase, harmonize, clean_string, CaseNameTweaker
from lxml import etree
from cl.corpus_importer.court_regexes import state_pairs
from parse_judges import find_judge_names
from regexes_columbia import SPECIAL_REGEXES, FOLDER_DICT
# initialized once since it takes resources
CASE_NAME_TWEAKER = CaseNameTweaker()
# tags for which content will be condensed into plain text
SIMPLE_TAGS = [
"reporter_caption", "citation", "caption", "court", "docket", "posture",
"date", "hearing_date", "panel", "attorneys"
]
# regex that will be applied when condensing SIMPLE_TAGS content
STRIP_REGEX = [r'</?citation.*>', r'</?page_number.*>']
# types of opinions that will be parsed
# each may have a '_byline' and '_text' node
OPINION_TYPES = ['opinion', 'dissent', 'concurrence']
def parse_file(file_path):
"""Parses a file, turning it into a correctly formatted dictionary, ready to
be used by a populate script.
:param file_path: A path the file to be parsed.
:param court_fallback: A string used as a fallback in getting the court
object. The regexes associated to its value in special_regexes will be used.
"""
raw_info = get_text(file_path)
info = {}
# get basic info
info['unpublished'] = raw_info['unpublished']
info['file'] = os.path.splitext(os.path.basename(file_path))[0]
info['docket'] = ''.join(raw_info.get('docket', [])) or None
info['citations'] = raw_info.get('citation', [])
info['attorneys'] = ''.join(raw_info.get('attorneys', [])) or None
info['posture'] = ''.join(raw_info.get('posture', [])) or None
info['court_id'] = get_state_court_object(''.join(raw_info.get('court', [])),
file_path) or None
if not info['court_id']:
raise Exception('Failed to find a court ID for "%s".' %
''.join(raw_info.get('court', [])))
# get the full panel text and extract judges from it
panel_text = ''.join(raw_info.get('panel', []))
#if panel_text:
# judge_info.append(('Panel\n-----', panel_text))
info['panel'] = find_judge_names(panel_text) or []
# get case names
info['case_name_full'] = format_case_name(''.join(raw_info.get('caption', []))) or ''
case_name = format_case_name(''.join(raw_info.get('reporter_caption', []))) or ''
if case_name:
info['case_name'] = case_name
else:
if info['case_name_full']:
# Sometimes the <caption> node has values and the <reporter_caption>
# node does not. Fall back to <caption> in this case.
info['case_name'] = info['case_name_full']
if not info['case_name']:
raise Exception('Failed to find case_name, even after falling back to '
'case_name_full value.')
info['case_name_short'] = CASE_NAME_TWEAKER.make_case_name_short(info['case_name']) or ''
# get dates
dates = raw_info.get('date', []) + raw_info.get('hearing_date', [])
info['dates'] = parse_dates(dates)
# figure out if this case was heard per curiam by checking the first chunk
# of text in fields in which this is usually indicated
info['per_curiam'] = False
first_chunk = 1000
for opinion in raw_info.get('opinions', []):
if 'per curiam' in opinion['opinion'][:first_chunk].lower():
info['per_curiam'] = True
break
if opinion['byline'] and 'per curiam' in opinion['byline'][:first_chunk].lower():
info['per_curiam'] = True
break
# condense opinion texts if there isn't an associated byline
# print a warning whenever we're appending multiple texts together
info['opinions'] = []
for current_type in OPINION_TYPES:
last_texts = []
for opinion in raw_info.get('opinions', []):
if opinion['type'] != current_type:
continue
last_texts.append(opinion['opinion'])
if opinion['byline']:
#judge_info.append((
# '%s Byline\n%s' % (current_type.title(), '-' * (len(current_type) + 7)),
# opinion['byline']
#))
# add the opinion and all of the previous texts
|
judges = find_judge_names(opinion['byline'])
info['opinions'].append({
'opinion': '\n'.join(last_texts),
'opinion_texts': last_texts,
'type': current_type,
'author': judges[0] if judges else None,
'joining': judges[1:] if len(judges) > 0 else [],
|
'byline': opinion['byline'],
})
last_texts = []
if current_type == 'opinion':
info['judges'] = opinion['byline']
if last_texts:
relevant_opinions = [o for o in info['opinions'] if o['type'] == current_type]
if relevant_opinions:
relevant_opinions[-1]['opinion'] += '\n%s' % '\n'.join(last_texts)
relevant_opinions[-1]['opinion_texts'].extend(last_texts)
else:
info['opinions'].append({
'opinion': '\n'.join(last_texts),
'opinion_texts': last_texts,
'type': current_type,
'author': None,
'joining': [],
'byline': '',
})
# check if opinions were heard per curiam by checking if the first chunk of
# text in the byline or in any of its associated opinion texts indicate this
for opinion in info['opinions']:
# if there's already an identified author, it's not per curiam
if opinion['author'] > 0:
opinion['per_curiam'] = False
continue
# otherwise, search through chunks of text for the phrase 'per curiam'
per_curiam = False
first_chunk = 1000
if 'per curiam' in opinion['byline'][:first_chunk].lower():
per_curiam = True
else:
for text in opinion['opinion_texts']:
if 'per curiam' in text[:first_chunk].lower():
per_curiam = True
break
opinion['per_curiam'] = per_curiam
# construct the plain text info['judges'] from collected judge data
#info['judges'] = '\n\n'.join('%s\n%s' % i for i in judge_info)
# Add the same sha1 and path values to every opinion (multiple opinions
# can come from a single XML file).
sha1 = get_sha1(file_path)
for opinion in info['opinions']:
opinion['sha1'] = sha1
opinion['local_path'] = file_path
return info
def get_sha1(file_path):
"""Calculate the sha1 of a file at a given path."""
hasher = hashlib.sha1()
with open(file_path, 'rb') as f:
buf = f.read()
hasher.update(buf)
return hasher.hexdigest()
def get_text(file_path):
"""Reads a file and returns a dictionary of grabbed text.
:param file_path: A path the file to be parsed.
"""
with open(file_path, 'r') as f:
file_string = f.read()
raw_info = {}
# used when associating a byline of an opinion with the opinion's text
current_byline = {'type': None, 'name': None}
# if this is an unpublished opinion, note this down and remove all
# <unpublished> tags
raw_info['unpublished'] = False
if '<opinion unpublished=true>' in file_string:
file_string = file_string.replace('<opinion unpublished=true>', '<opinion>')
file_string = file_string.replace('<unpublished>', '').replace('</unpublished>', '')
raw_info['unpublished'] = True
# turn the file into a readable tree
attempts = [
{'recover': False, 'replace': False},
{'recover': False, 'replace': True},
{'recover': True, 'replace': False},
{'recover': True, 'replace': True},
]
replaced_string
|
i-sultan/Smart-Trader
|
src/st_cache_handler.py
|
Python
|
gpl-3.0
| 3,536 | 0.009055 |
"""File to interact with cache folder to isolate cache handling functionality
from main controllers code.
The CacheHandler should only be accessed by controller classes.
"""
#.-------------------.
#| imports |
#'-------------------'
import os
import pickle
#.-------------------.
#| main entry |
#'-------------------'
class CacheHandler(object):
""" Class to facilitate interaction with the cache folder. """
def __init__(self):
pass
def pickle_object(self, filename, instance):
pickle.dump(instance, open(filename, "wb"))
def unpickle_object(self, filename):
return pickle.load(open(filename, "rb"))
def get_folders(self):
""" Return list of folders within cache. """
return [folder for folder in os.listdir('cache')
if os.path.isdir(os.path.join('cache', folder))]
def get_subfolders(self, folder):
""" Return list of subfolders within cache. """
folder = os.path.join('cache', folder)
return [subfolder for subfolder in os.listdir(folder)
if os.path.isdir(os.path.join(folder, subfolder))]
def get_extension(self, filename):
return os.path.splitext(filename)[1][1:]
def get_filenames(self, folder, subfolder, ext = None):
""" Return list of filenames within cache. """
subfolder = os.path.join('cache', folder, subfolder)
return [filename for filename in os.listdir(subfolder)
if (not os.path.isdir(os.path.join(subfolder, filename))) and
(not ext or self.get_extension(filename) == ext)]
def save_single(self, folder, subfolder, file, instance):
""" Save the instance at specified location, and delete all other files in same subfolder. """
if folder not in self.get_folders():
os.makedirs(os.path.join('cache', folder))
if subfolder not in self.get_subfolders(folder):
os.makedirs(
|
os.path.join('cache', folder, subfolder))
else:
# cleanup directory before saving new file. TODO: warn user if not empty.
|
for file_name in self.get_filenames(folder, subfolder):
os.remove(os.path.join('cache', folder, subfolder, file_name))
location = os.path.join('cache', folder, subfolder, file)
self.pickle_object(location, instance)
return location
def save_df(self, folder, subfolder, file, data_frame):
""" Save the DataFrame at specified location, without deleting other files in same subfolder. """
if folder not in self.get_folders():
os.makedirs(os.path.join('cache', folder))
if subfolder not in self.get_subfolders(folder):
os.makedirs(os.path.join('cache', folder, subfolder))
location = os.path.join('cache', folder, subfolder, file)
data_frame.to_csv(location)
return location
def load_single(self, folder, subfolder):
""" Unpickle and return the instance inside first file at specified location. """
if folder not in self.get_folders() or \
subfolder not in self.get_subfolders(folder) or \
len(self.get_filenames(folder, subfolder, "trm")) == 0:
return None
file = self.get_filenames(folder, subfolder, "trm")[0] # if multiple files, will use first file only
location = os.path.join('cache', folder, subfolder, file)
return self.unpickle_object(location)
|
aio-libs/aiohttp_session
|
aiohttp_session/memcached_storage.py
|
Python
|
apache-2.0
| 3,256 | 0 |
import json
import uuid
from time import time
from typing import Any, Callable, Optional
import aiomcache
from aiohttp import web
from . import AbstractStorage, Session
class MemcachedStorage(AbstractStorage):
"""Memcached storage"""
def __init__( # type: ignore[no-any-unimported] # TODO: aiomcache
self,
memcached_conn: aiomcache.Client, *,
cookie_name: str = "AIOHTTP_SESSION",
domain: Optional[str] = None,
max_age: Optional[int] = None,
|
path: str = '/',
secure: Optional[bool] = None,
httponly: bool = True,
key_factory: Callable[[], str] = lambda: uuid.uuid4().hex,
encoder: Callable[[object], str] = json.dumps,
decoder: Callable[[str], Any] = json.loads
) -> None:
super().__init__(cookie_name=cookie_name, domain=domain,
|
max_age=max_age, path=path, secure=secure,
httponly=httponly,
encoder=encoder, decoder=decoder)
self._key_factory = key_factory
self.conn = memcached_conn
async def load_session(self, request: web.Request) -> Session:
cookie = self.load_cookie(request)
if cookie is None:
return Session(None, data=None, new=True, max_age=self.max_age)
else:
key = str(cookie)
stored_key = (self.cookie_name + '_' + key).encode('utf-8')
data = await self.conn.get(stored_key)
if data is None:
return Session(None, data=None,
new=True, max_age=self.max_age)
data = data.decode('utf-8')
try:
data = self._decoder(data)
except ValueError:
data = None
return Session(key, data=data, new=False, max_age=self.max_age)
async def save_session(
self,
request: web.Request,
response: web.StreamResponse,
session: Session
) -> None:
key = session.identity
if key is None:
key = self._key_factory()
self.save_cookie(response, key,
max_age=session.max_age)
else:
if session.empty:
self.save_cookie(response, '',
max_age=session.max_age)
else:
key = str(key)
self.save_cookie(response, key,
max_age=session.max_age)
data = self._encoder(self._get_session_data(session))
max_age = session.max_age
# https://github.com/memcached/memcached/wiki/Programming#expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
if max_age is None:
expire = 0
elif max_age > 30*24*60*60:
expire = int(time()) + max_age
else:
expire = max_age
stored_key = (self.cookie_name + '_' + key).encode('utf-8')
await self.conn.set(stored_key, data.encode('utf-8'), exptime=expire)
|
neurosynth/ACE
|
ace/__init__.py
|
Python
|
mit
| 1,044 | 0.007663 |
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
"""ACE -- Automated Coordinate Extraction.
"""
__all__ = ["conf
|
ig", "database", "datatable", "exporter", "set_logging_level", "scrape", "sources", "tableparser", "tests", "__version__"]
import logging
import sys
import os
from version import __version__
def set_logging_level(level=None):
"""Set package-wide logging level
Args
level : Logging level constant from logging module (warning, error, info, etc.)
"""
if level is None:
level = os.environ.get('ACE_
|
LOGLEVEL', 'warn')
logger.setLevel(getattr(logging, level.upper()))
return logger.getEffectiveLevel()
def _setup_logger(logger):
# Basic logging setup
console = logging.StreamHandler(sys.stdout)
console.setFormatter(logging.Formatter("%(levelname)-6s %(module)-7s %(message)s"))
logger.addHandler(console)
set_logging_level()
# Set up logger
logger = logging.getLogger("ace")
_setup_logger(logger)
|
robwarm/gpaw-symm
|
doc/tutorials/dipole_correction/submit.agts.py
|
Python
|
gpl-3.0
| 285 | 0 |
def agts(queu
|
e):
d = queue.add('dipole.py', ncpus=4, walltime=60)
queue.add('plot.py', deps=d, ncpus=1, walltime=10,
creates=['zero.png', 'period
|
ic.png', 'corrected.png',
'slab.png'])
queue.add('check.py', deps=d, ncpus=1, walltime=10)
|
timopulkkinen/BubbleFish
|
tools/perf/page_sets/page_sets_unittest.py
|
Python
|
bsd-3-clause
| 643 | 0.010886 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-styl
|
e license that can be
# found in the LICENSE file.
import unittest
from telemetry.page import page_set
import page_sets
class PageSetsUnittest(unittest.TestCase):
"""Verfies that all the pagesets in this directory are syntactically valid."""
@staticmethod
def testPageSetsParseCorrectly():
filenames = page_sets.GetAll
|
PageSetFilenames()
for filename in filenames:
try:
page_set.PageSet.FromFile(filename)
except Exception, ex:
raise Exception("Pageset %s: %s" % (filename, str(ex)))
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/docutils/parsers/rst/directives/admonitions.py
|
Python
|
gpl-2.0
| 2,413 | 0.000414 |
# $Id: admonitions.py 7681 2013-07-12 07:52:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *textnodes)
title.source, title.line = (
self.state_machine.get_source_and_line(self.lineno))
admonition_node += title
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text
|
)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdm
|
onition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
|
akosyakov/intellij-community
|
python/testData/debug/test_ignore_lib.py
|
Python
|
apache-2.0
| 84 | 0.011905 |
from cale
|
ndar import setfirstwee
|
kday
stopped_in_user_file = True
setfirstweekday(15)
|
hans-boden/pyws-fablab-lisbon
|
contribs/luis_mp/mm_proposal_wo_kivi.py
|
Python
|
unlicense
| 1,919 | 0.008863 |
# python3
"""
Mastermind without kivy - by Luis
merciless edited by hans
"""
import random
import re
class G():
valid_chars = '123456'
secret_len = 5
solved = '+' * secret_len
regex_str = "^[{0}]{{{1},{1}}}$".format(valid_chars, secret_len)
valid_input = re.compile(regex_str) # regular express
|
ion for user input
def main():
secret = answer_generator()
print('Enter your guess of {} of these symbols: ({})'
.format(G.secret_len, G.valid_chars))
while True:
user_seq = user_guess()
output = handle_game(secret, user_seq)
result_msg = ('{} -> {}')
print(result_msg.format(user_seq, output))
if output == G.so
|
lved:
break
print('You have found the answer! Goodbye!')
def handle_game(answer, guess):
answer = list(answer) # no need to str() or to assign a new name
guess = list(guess)
output = ''
for i, ch in enumerate(guess):
if ch == answer[i]:
# eliminate hits from both lists, but leave position untouched
guess[i] = '°' # any char which is not in valid_chars
answer[i] = '^'
output += '+'
for ch in guess:
if ch in answer:
# remove hit from answer, position is no longer important
answer.remove(ch)
output += '-'
return output
def user_guess():
while True:
response = input() # no argument needed, default is ''
if G.valid_input.match(response):
return response
print("wrong input...")
def answer_generator(): # Creates random sequence of n characters
seq = ''
for _ in range(G.secret_len): # '_': we dont care for the value
seq += random.choice(G.valid_chars) # valid_chars string is iterable
return seq
if __name__ == '__main__':
main()
|
relic7/prodimages
|
python/jbmodules/image_processing/marketplace/multiprocmagick.py
|
Python
|
mit
| 8,914 | 0.010433 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import multiprocessing, time
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
def run(self):
proc_name = self.name
#try:
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
print '%s: Exiting' % proc_name
#fnx = dir(self)
self.task_queue.task_done()
print self.result_queue, self.task_queue, ' resQ and TaskQ <-- --> pid -- isalive --> ', self.pid, self.is_alive
break
print '%s: %s' % (proc_name, next_task)
answer = next_task()
self.task_queue.task_done()
self.result_queue.put(answer)
print '%s: AnsweredPUT-taskDone in Consumer ' % proc_name
return
# except AttributeError:
# print ' None Type Error End '
# return
# finally:
# return
class Task(object):
def __init__(self, img, rgbmean, destd
|
ir):
import tempfile, shutil
# tmpfileobj, tmpfile_path = tempfile.mkstemp(suffix=".png")
self.img = img
self.rgbmean = rgbmean
self.destdir = destdir
#self.tmppngout = tempfile.mkstemp(suffix=".png")
def __call__(self):
|
#import jbmodules
import os
import image_processing
from image_processing import marketplace, magick_tweaks
import image_processing.marketplace.magicColorspaceModAspctLoadFaster2 as magickProc2
#time.sleep(0.1) # pretend to take some time to do the work
import image_processing.magick_tweaks.convert_img_srgb
# try:
image_processing.magick_tweaks.convert_img_srgb.main(image_file=self.img)
print self.img, ' <-- self.img ', self.rgbmean
#self.tmppngout(
pngout = magickProc2.subproc_magick_png(self.img, rgbmean=self.rgbmean, destdir=self.destdir)
if os.path.isfile(pngout):
magickProc2.subproc_magick_large_jpg(pngout, destdir=self.destdir)
if os.path.isfile(pngout):
ret = magickProc2.subproc_magick_medium_jpg(pngout, destdir=self.destdir)
#os.remove(self.tmppngout[1])
# except TypeError:
# print self.img, ' <-- Type-Error in Task -->', self.destdir
# pass
# except AttributeError:
# print self.img, ' <-- AttributeError in Task -->', self.destdir
# pass
# except IndexError:
# ' None Type Error End '
# pass
return '-ret- %s \n-path- %s \n-dest- %s \n' % (ret, self.img, self.destdir)
else:
return
def __str__(self):
return '%s -- %s' % (self.img, self.destdir)
def run_threaded_imgdict(argslist=None):
import Queue
import threading
import multiprocessing
import image_processing
from image_processing.marketplace.magicColorspaceModAspctLoadFaster2 import sort_files_by_values
q = Queue.Queue()
# print type(argslist), len(argslist), ' type and length argslist \n'
#print type(argslist), type(argslist)
for i in argslist[0]: #put 30 tasks in the queue
#print 'i ', ' argslist'
if i:
q.put([i])
img_dict_list = []
def worker():
count = 0
while True:
item = q.get()
#print item[0]
imgdata = sort_files_by_values(item)
#print imgdata
img_dict_list.append(imgdata)
# Can add functions to adjust based on imgdict params or store image data or delete etc.
# insertres = insert_gridfs_extract_metadata(item[0])
count += 1
print count, '\n\t ImageDict Threade'#, imgdata
q.task_done()
#print 'argsL --> len arglist', len(argslist[0]), type(argslist), ' Type ArgsList RunThreaded'
jobcount = multiprocessing.cpu_count() - 2 #len(argslist[0]) #detect number of cores
print("Creating %d threads" % jobcount)
for i in xrange(jobcount):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
q.join() #block until all tasks are done
return img_dict_list
def funkRunner2(root_img_dir=None):
import multiprocessing
#import Queue
import threading
import glob, os
#from os import os.path
#import jbmodules
import image_processing
from image_processing.marketplace.magicColorspaceModAspctLoadFaster2 import rename_retouched_file, sort_files_by_values
destdir = '/mnt/Post_Complete/ImageDrop'
print 'Starting Funkrunner2 Pools'
########## One ##########
#
# 1A
# List of images to run through processing as glob of the root_img_dir
#print root_img_dir, ' <-- Rootimgdir FunkR2'
if root_img_dir == '/mnt/Post_Complete/Complete_Archive/MARKETPLACE' or root_img_dir is None:
imagesGlob = os.path.join(root_img_dir, '*/*/*.??[gG]')
else:
imagesGlob = os.path.join(root_img_dir, '*.??[gG]')
# 1B
# Rename files using Multiproc pool
poolRename = multiprocessing.Pool(8)
images = [ f for f in glob.glob(imagesGlob) if f is not None ]
while len(images) == 0:
print len(images), ' <-- Length of the Images to Rename,Process etc. Now the Renamer'
break
resrename = poolRename.map(rename_retouched_file, images)
poolRename.close()
poolRename.join()
print 'Images Renamed'
########## Two ##########
#
# 2
# Extract image pixel data for enhancements. As list of tuples, [<url>, {rgbdata} ].. ithink
img_list = [ f for f in glob.glob(imagesGlob) if f is not None ]
#print type(img_list), '\tLen ImageList preThreaded'
img_dict = run_threaded_imgdict(argslist=(img_list,))
########## Three ##########
#
# 3A
# Init Task and Results Queues
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
# 3B
# Start consumers
num_consumers = multiprocessing.cpu_count() - 2
print 'Creating %d consumers' % num_consumers
consumers = [ Consumer(tasks, results)
for i in xrange(num_consumers) ]
for w in consumers:
w.start()
# 3C --> Run
# Tasks Add
# Add Images and rgb data and dest to tasks
num_jobs = len(img_dict)
#print 'jobs -- consumers -- root_img_dir --> ', num_jobs, consumers, root_img_dir
for item in img_dict:
img, rgbmean = item.keys()[0], item.values() #.items()
#print img, 'rgbmean', ' Img -- RGB Mean'
tasks.put(Task(img, rgbmean, destdir))
print 'Put Tasks'
# 3P --> Poinson pill to help stop hanging procs
# Add a poison pill for each consumer
for i in xrange(num_consumers):
tasks.put(None)
#print i, ' tasks put line 191 mutiroc --><END'
# 3X --> End
# Wait for all of the tasks to finish
tasks.join()
########## Four ##########
#
# 4 --> Results
# Start printing results
while num_jobs:
result = results.get()
print 'Result Q Results: ', result
num_jobs -= 1
########## Five ##########
# Delete em all
# if root_img_dir == '/mnt/Post_Complete/Complete_Archive/MARKETPLACE':
# poolDelete = multiprocessing.Pool(8)
# import os
# poolDelete.map(os.remove, img_list)
# poolDelete.close()
# poolDelete.join()
# print' And now they are Gone'
#return
def run_multiproccesses_magick(searchdir=None):
import multiprocessing
import glob,os
#import jbmodules
import image_processing
import image_processing.marketplace.magicColorspaceModAspctLoadFaster2 as magickProc
if not searchdir:
searchdir = os.path.abspath('/mnt/Post_Complete/Complete_Archive/MARKETPLACE/SWI')
else:
pass
pool = multiprocessing.Pool(4)
directory_list = []
if searchdir.split('/')[-1] == 'SWI':
[
|
glic3rinu/basefs
|
basefs/fs.py
|
Python
|
mit
| 8,050 | 0.002733 |
import os
import sys
import errno
import itertools
import logging
import stat
import threading
from fuse import FuseOSError, Operations
from . import exceptions, utils
from .keys import Key
from .logs import Log
from .views import View
logger = logging.getLogger('basefs.fs')
class ViewToErrno():
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
if exc_type is exceptions.PermissionDenied:
raise FuseOSError(errno.EACCES)
if exc_type is exceptions.DoesNotExist:
raise FuseOSError(errno.ENOENT)
if exc_type is exceptions.Exists:
raise FuseOSError(errno.EEXIST)
class FileSystem(Operations):
def __init__(self, view, serf=None, serf_agent=None, init_function=None):
self.view = view
self.cache = {}
self.dirty = {}
self.loaded = view.log.loaded
self.init_function = init_function
self.serf = serf
self.serf_agent = serf_agent
def __call__(self, op, path, *args):
logger.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
logger.debug('<- %s %s', op, repr(ret))
def init(self, path):
""" threads should start here, otherwise will not run when fuse is backgrounded """
if self.init_function:
self.init_function()
def destroy(self, path):
super().destroy(path)
if self.serf_agent:
self.serf_agent.stop()
def get_node(self, path):
# check if logfile has been modified
if self.loaded != self.view.log.loaded:
logger.debug('-> %s rebuild', path)
self.view.build()
self.loaded = self.view.log.loaded
with ViewToErrno():
node = self.view.get(path)
if node.entry.action == node.entry.DELETE:
raise FuseOSError(errno.ENOENT)
return node
def send(self, node):
if self.serf:
entry = node.entry
logger.debug("Sending entry %s '%s'", entry.hash, entry.name)
self.serf.send(node.entry)
# def access(self, path, mode):
# return super(FileSystem, self).access(path, mode)
# full_path = self._full_path(path)
# if not os.access(full_path, mode):
# raise FuseOSError(errno.EACCES)
# def chmod(self, path, mode):
# full_path = self._full_path(path)
# return os.chmod(full_path, mode)
# def chown(self, path, uid, gid):
# full_path = self._full_path(path)
# return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
has_perm = bool(self.view.get_key(path))
if node.entry.action == node.entry.MKDIR:
mode = stat.S_IFDIR | (0o0750 if has_perm else 0o0550)
else:
mode = stat.S_IFREG | (0o0640 if has_perm else 0o0440)
return {
'st_atime': node.entry.timestamp,
'st_ctime': node.entry.ctime,
'st_gid': os.getgid(),
'st_mode': mode,
'st_mtime': node.entry.timestamp,
'st_nlink': 1,
'st_size': len(node.content),
'st_uid': os.getuid(),
}
else:
import time
return {
'st_atime': time.time(),
'st_ctime': time.time(),
'st_gid': os.getgid(),
'st_mode': stat.S_IFREG | 0o0640,
'st_mtime': time.time(),
'st_nlink': 1,
'st_size': len(content),
'st_uid': os.getuid(),
}
# full_path = self._full_path(path)
# st = os.lstat(full_path)
# return dict((key, getattr(st, key)) for key in ())
def readdir(self, path, fh):
node = self.get_node(path)
entry = node.entry
dirs = ['.', '..']
for d in itertools.chain(dirs, [child.entry.name for child in node.childs if child.entry.action not in (entry.DELETE, entry.GRANT, entry.REVOKE)]):
yield d
# def readlink(self, path):
# pathname = os.readlink(self._full_path(path))
# if pathname.startswith("/"):
# # Path name is absolute, sanitize it.
# return os.path.relpath(pathname, self.root)
# else:
# return pathname
def mknod(self, path, mode, dev):
raise NotImplementedError
def rmdir(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
def mkdir(self, path, mode):
with ViewToErrno():
node = self.view.mkdir(path)
self.send(node)
return 0
# def statfs(self, path):
# full_path = self._full_path(path)
# stv = os.statvfs(full_path)
# return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
# 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
# 'f_frsize', 'f_namemax'))
def unlink(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
# return os.unlink(self._full_path(path))
# def symlink(self, name, target):
# return os.symlink(name, self._full_path(target))
def rename(self, old, new):
raise NotImplementedError
# def link(self, target, name):
# return os.link(self._full_path(target), self._full_path(name))
#
|
def utimens(self, path, times=None):
# return os.utime(self._full_path(path), times)
# # File methods
# # ============
def open(self, path, flags):
node = self.get_node(pat
|
h)
id = int(node.entry.hash, 16)
if path not in self.cache:
self.cache[path] = node.content
self.dirty[path] = False
return id
def create(self, path, mode, fi=None):
self.cache[path] = b''
self.dirty[path] = True
return id(path)
def read(self, path, length, offset, fh):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
return content[offset:offset+length]
def write(self, path, buf, offset, fh):
# TODO check write perissions
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
size = len(buf)
new_content = content[:offset] + buf + content[offset+size:]
if content != new_content:
self.dirty[path] = True
self.cache[path] = new_content
return size
def truncate(self, path, length, fh=None):
self.cache[path] = self.cache[path][:length]
self.dirty[path] = True
# def flush(self, path, fh):
# # TODO Filesystems shouldn't assume that flush will always be called after some writes, or that if will be called at all.
# content = self.cache.pop(path, None)
# dirty = self.dirty.pop(path, False)
# if content is not None and dirty:
# print('write')
# node = self.view.write(path, content)
## self.send(node)
def release(self, path, fh):
content = self.cache.pop(path, None)
dirty = self.dirty.pop(path, False)
if content is not None and dirty:
# TODO raise permission denied should happen in write() create().... not here
with ViewToErrno():
node = self.view.write(path, content)
self.send(node)
# def fsync(self, path, fdatasync, fh):
# return self.flush(path, fh)
# return None
|
vlna/another-py-invaders
|
another-py-invaders.py
|
Python
|
gpl-3.0
| 3,451 | 0.006375 |
# import libraries
import math
import random
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
width, height = 8
|
00, 600
screen = pygame.display.set_mode((width, height))
keys = [False, False, False, False]
player =
|
[100, 520]
invaders = []
bullets = []
bombs = []
rockets = []
rocketpieces = []
bgimg = pygame.image.load("g:/invaders/paragliding_2017_4_bsl-73.jpg")
invaderimg = pygame.transform.scale(pygame.image.load("g:/invaders/Space-Invaders-PNG-Clipart.png"), (64, 64))
playerimg = pygame.transform.scale(pygame.image.load("g:/invaders/space-invaders-1again.png"), (64, 64))
bulletimg = pygame.transform.scale(pygame.image.load("g:/invaders/square-rounded-512.png"), (16, 16))
# 4 - keep looping through
running = 1
exitcode = 0
invadersmv = 1
# create invaders
for i in range (0, 734, 96):
for j in range (0, 300, 64):
invaders.append([i, j])
while running:
# 5 - clear the screen before drawing it again
movedown=False
#screen.fill(0)
# 6 - draw the screen elements
screen.blit(bgimg, (0, 0))
screen.blit(playerimg, player)
for invader in invaders:
screen.blit(invaderimg, invader)
for invader in invaders:
if invader[0] >= 736:
invadersmv = -1
movedown=True
break
if invader[0] <= 0:
invadersmv = 1
movedown=True
break
for invader in invaders:
invader[0] += invadersmv
if movedown: invader[1] += 2
for bullet in bullets:
screen.blit(bulletimg, bullet)
bullet[1] -= 1
if len(bullets) > 0 and bullets[0][1] <= -16:
bullets.pop(0)
# collision check
destroyedinvaders = []
destroyedbullets = []
for bullet in bullets:
for invader in invaders:
if bullet[0] < invader[0] + 16 and bullet[0] + 64 > invader[0] and bullet[1] < invader[1] + 16 and invader[1] + 16 > bullet[1]:
destroyedbullets.append(bullet)
destroyedinvaders.append(invader)
#print('collision')
bullets = [item for item in bullets if item not in destroyedbullets]
invaders = [item for item in invaders if item not in destroyedinvaders]
# 9 - Move player
## if keys[0]:
## player[1] -= 5
## elif keys[2]:
## player[1] += 5
if keys[1] and player[0] >= 0:
player[0] -= 5
elif keys[3] and player[0] <= 736:
player[0] += 5
# 7 - update the screen
pygame.display.flip()
# 8 - check events
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_w:
keys[0] = True
elif event.key == K_a:
keys[1] = True
elif event.key == K_s:
keys[2] = True
elif event.key == K_d:
keys[3] = True
if event.type == KEYUP:
if event.key == K_w:
keys[0] = False
elif event.key == K_a:
keys[1] = False
elif event.key == K_s:
keys[2] = False
elif event.key == K_d:
keys[3] = False
if event.type == QUIT:
pygame.quit()
exit(0)
if event.type == MOUSEBUTTONDOWN:
#shoot.play()
if len(bullets) < 3: # up to three bullets
bullets.append([player[0]+32, player[1]-32])
|
liqd/adhocracy4
|
tests/filter/test_free_text_filter.py
|
Python
|
agpl-3.0
| 2,219 | 0 |
import django_filters
import pytest
from django.core.exceptions import ImproperlyConfigured
from adhocracy4.filters.filters import FreeTextFilter
from adhocracy4.filters.views import FilteredListV
|
iew
from tests.apps.questions import models as question_models
class SearchFilterS
|
et(django_filters.FilterSet):
search = FreeTextFilter(
fields=['text']
)
class Meta:
model = question_models.Question
fields = ['search']
@pytest.fixture
def question_list_view():
class DummyView(FilteredListView):
model = question_models.Question
filter_set = SearchFilterSet
return DummyView.as_view()
@pytest.mark.django_db
def test_free_text_filter(rf, question_list_view, phase, question_factory):
project = phase.module.project
question_factory(text='some text')
question_factory(text='more text')
request = rf.get('/questions')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 2
request = rf.get('/questions?search=')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 2
request = rf.get('/questions?search=text')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 2
request = rf.get('/questions?search=some')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 1
request = rf.get('/questions?search=katze')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 0
@pytest.mark.django_db
def test_free_text_filter_exception():
with pytest.raises(ImproperlyConfigured):
class SearchFilterSet(django_filters.FilterSet):
search = FreeTextFilter(
# no fields set
)
class Meta:
model = question_models.Question
fields = ['search']
|
vinaymayar/python-game-workshop
|
lesson4/guess_game.py
|
Python
|
mit
| 1,288 | 0.009317 |
# Challenge: guess-number game infinite number of guesses
# The game: Guess the number game.
# In this game we will try to guess a random number between 0 and 100 generated
# by the computer. Depending on our guess, the computer will give us hints,
# whether we guessed too high, too low or if we guessed correctly.
#
# Challenge: Make the game harder by limiting the number of guesses the player
# can make.
# Hint: Try creating a new variable t
|
hat counts the number of guesses.
# Increment it every time the user makes a guess and use control flow statements
# to see if they reached the limit!
# Don't worry about these lines.
from random import randint
secret_number = randint(0, 100)
while(True): # don't worry about this either, but be sure to follow the indentation level
print("Make your guess:")
guess = ... # remember how we get the input from the user?
if (guess == secret_number):
# a
|
dd a print statement letting the user know they made the right guess.
break; # don't worry about this line, we will learn more about this, when we
# learn about loops!
elif ... # how can we check if the guess is too high?
# what should we do if the guess is too high?
else:
# what should we do if the guess is too low?
|
ccpem/mrcfile
|
tests/test_mrcmemmap.py
|
Python
|
bsd-3-clause
| 2,865 | 0.004538 |
# Copyright (c) 2016, Science and Technology Facilities Council
# This software is distributed under a BSD licence. See LICENSE.txt.
"""
Tests for mrcmemmap.py
"""
# Import Python 3 features for future-proofing
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import unittest
import numpy as np
from .test_mrcfile import MrcFileTest
from mrcfile.mrcmemmap import MrcMemmap
class MrcMemmapTest(MrcFileTest):
"""Unit tests for MRC file I/O with memory-mapped files.
Note that this test class inherits MrcFileTest to ensure all of the tests
for MrcObject and MrcFile work correctly for the MrcMemmap subclass.
"""
def setUp(self):
# Set up as if for MrcFileTest
super(MrcMemmapTest, self).setUp()
# Set the newmrc method to the MrcMemmap constructor
self.newmrc = MrcMemmap
# Set up parameters so MrcObject tests run on the MrcMemmap class
obj_mrc_name = os.path.join(self.test_output, 'test_mrcobject.mrc')
self.mrcobject = MrcMemmap(obj_mrc_name, 'w+', overwrite=True)
def test_repr(self):
"""Override test to change expected repr string."""
wi
|
th MrcMemmap(self.example_mrc_name) as mrc:
assert repr(mrc) == "MrcMemmap('
|
{0}', mode='r')".format(self.example_mrc_name)
def test_exception_raised_if_file_is_too_small_for_reading_data(self):
"""Override test to change expected error message."""
with self.newmrc(self.temp_mrc_name, mode='w+') as mrc:
mrc.set_data(np.arange(24, dtype=np.int16).reshape(2, 3, 4))
assert mrc.header.mz == 2
mrc.header.mz = mrc.header.nz = 3
# The exception type and message are different on Linux and Windows
expected_error_msg = ("mmap length is greater than file size"
"|Not enough storage is available")
with self.assertRaisesRegex(Exception, expected_error_msg):
self.newmrc(self.temp_mrc_name)
def test_data_is_not_copied_unnecessarily(self):
"""Override test because data has to be copied for mmap."""
data = np.arange(6, dtype=np.int16).reshape(1, 2, 3)
self.mrcobject.set_data(data)
assert self.mrcobject.data is not data
def test_data_array_cannot_be_changed_after_closing_file(self):
mrc = self.newmrc(self.temp_mrc_name, mode='w+')
mrc.set_data(np.arange(12, dtype=np.int16).reshape(3, 4))
data_ref = mrc.data
# Check that writing to the data array does not raise an exception
data_ref[0,0] = 1
mrc.close()
assert not data_ref.flags.writeable
with self.assertRaises(ValueError):
data_ref[0,0] = 2
if __name__ == "__main__":
unittest.main()
|
PeterLValve/apitrace
|
specs/d3d9types.py
|
Python
|
mit
| 30,033 | 0.001232 |
##########################################################################
#
# Copyright 2011 Jose Fonseca
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""d3d9types.h"""
from winapi import *
D3DCOLOR = Alias("D3DCOLOR", DWORD)
D3DVECTOR = Struct("D3DVECTOR", [
(Float, "x"),
(Float, "y"),
(Float, "z"),
])
D3DCOLORVALUE = Struct("D3DCOLORVALUE", [
(Float, "r"),
(Float, "g"),
(Float, "b"),
(Float, "a"),
])
D3DRECT = Struct("D3DRECT", [
(LONG, "x1"),
(LONG, "y1"),
(LONG, "x2"),
(LONG, "y2"),
])
D3DMATRIX = Struct("D3DMATRIX", [
(Array(Array(Float, 4), "4"), "m"),
])
D3DVIEWPORT9 = Struct("D3DVIEWPORT9", [
(DWORD, "X"),
(DWORD, "Y"),
(DWORD, "Width"),
(DWORD, "Height"),
(Float, "MinZ"),
(Float, "MaxZ"),
])
D3DCLIPPLANE = Flags(DWORD, [
"D3DCLIPPLANE0",
"D3DCLIPPLANE1",
"D3DCLIPPLANE2",
"D3DCLIPPLANE3",
"D3DCLIPPLANE4",
"D3DCLIPPLANE5",
])
D3DCS = Flags(DWORD, [
"D3DCS_ALL",
"D3DCS_LEFT",
"D3DCS_RIGHT",
"D3DCS_TOP",
"D3DCS_BOTTOM",
"D3DCS_FRONT",
"D3DCS_BACK",
"D3DCS_PLANE0",
"D3DCS_PLANE1",
"D3DCS_PLANE2",
"D3DCS_PLANE3",
"D3DCS_PLANE4",
"D3DCS_PLANE5",
])
D3DCLIPSTATUS9 = Struct("D3DCLIPSTATUS9", [
(DWORD, "ClipUnion"),
(DWORD, "ClipIntersection"),
])
D3DMATERIAL9 = Struct("D3DMATERIAL9", [
(D3DCOLORVALUE, "Diffuse"),
(D3DCOLORVALUE, "Ambient"),
(D3DCOLORVALUE, "Specular"),
(D3DCOLORVALUE, "Emissive"),
(Float, "Power"),
])
D3DLIGHTTYPE = Enum("D3DLIGHTTYPE", [
"D3DLIGHT_POINT",
"D3DLIGHT_SPOT",
"D3DLIGHT_DIRECTIONAL",
])
D3DLIGHT9 = Struct("D3DLIGHT9", [
(D3DLIGHTTYPE, "Type"),
(D3DCOLORVALUE, "Diffuse"),
(D3DCOLORVALUE, "Specular"),
(D3DCOLORVALUE, "Ambient"),
(D3DVECTOR, "Position"),
(D3DVECTOR, "Direction"),
(Float, "Range"),
(Float, "Falloff"),
(Float, "Attenuation0"),
(Float, "Attenuation1"),
(Float, "Attenuation2"),
(Float, "Theta"),
(Float, "Phi"),
])
D3DCLEAR = Flags(DWORD, [
"D3DCLEAR_TARGET",
"D3DCLEAR_ZBUFFER",
"D3DCLEAR_STENCIL",
])
D3DSHADEMODE = Enum("D3DSHADEMODE", [
"D3DSHADE_FLAT",
"D3DSHADE_GOURAUD",
"D3DSHADE_PHONG",
])
D3DFILLMODE = Enum("D3DFILLMODE", [
"D3DFILL_POINT",
"D3DFILL_WIREFRAME",
"D3DFILL_SOLID",
])
D3DBLEND = Enum("D3DBLEND", [
"D3DBLEND_ZERO",
"D3DBLEND_ONE",
"D3DBLEND_SRCCOLOR",
"D3DBLEND_INVSRCCOLOR",
"D3DBLEND_SRCALPHA",
"D3DBLEND_INVSRCALPHA",
"D3DBLEND_DESTALPHA",
"D3DBLEND_INVDESTALPHA",
"D3DBLEND_DESTCOLOR",
"D3DBLEND_INVDESTCOLOR",
"D3DBLEND_SRCALPHASAT",
"D3DBLEND_BOTHSRCALPHA",
"D3DBLEND_BOTHINVSRCALPHA",
"D3DBLEND_BLENDFACTOR",
"D3DBLEND_INVBLENDFACTOR",
"D3DBLEND_SRCCOLOR2",
"D3DBLEND_INVSRCCOLOR2",
])
D3DBLENDOP = Enum("D3DBLENDOP", [
"D3DBLENDOP_ADD",
"D3DBLENDOP_SUBTRACT",
"D3DBLENDOP_REVSUBTRACT",
"D3DBLENDOP_MIN",
"D3DBLENDOP_MAX",
])
D3DTEXTUREADDRESS = Enum("D3DTEXTUREADDRESS", [
"D3DTADDRESS_WRAP",
"D3DTADDRESS_MIRROR",
"D3DTADDRESS_CLAMP",
"D3DTADDRESS_BORDER",
"D3DTADDRESS_MIRRORONCE",
])
D3DCULL = Enum("D3DCULL", [
"D3DCULL_NONE",
"D3DCULL_CW",
"D3DCULL_CCW",
])
D3DCMPFUNC = Enum("D3DCMPFUNC", [
"D3DCMP_NEVER",
"D3DCMP_LESS",
"D3DCMP_EQUAL",
"D3DCMP_LESSEQUAL",
"D3DCMP_GREATER",
"D3DCMP_NOTEQUAL",
"D3DCMP_GREATEREQUAL",
"D3DCMP_ALWAYS",
])
D3DSTENCILOP = Enum("D3DSTENCILOP", [
"D3DSTENCILOP_KEEP",
"D3DSTENCILOP_ZERO",
"D3DSTENCILOP_REPLACE",
"D3DSTENCILOP_INCRSAT",
"D3DSTENCILOP_DECRSAT",
"D3DSTENCILOP_INVERT",
"D3DSTENCILOP_INCR",
"D3DSTENCILOP_DECR",
])
D3DFOGMODE = Enum("D3DFOGMODE", [
"D3DFOG_NONE",
"D3DFOG_EXP",
"D3DFOG_EXP2",
"D3DFOG_LINEAR",
])
D3DZBUFFERTYPE = Enum("D3DZBUFFERTYPE", [
"D3DZB_FALSE",
"D3DZB_TRUE",
"D3DZB_USEW",
])
D3DPRIMITIVETYPE = Enum("D3DPRIMITIVETYPE", [
"D3DPT_POINTLIST",
"D3DPT_LINELIST",
"D3DPT_LINESTRIP",
"D3DPT_TRIANGLELIST",
"D3DPT_TRIANGLESTRIP",
"D3DPT_TRIANGLEFAN",
])
D3DTRANSFORMSTATETYPE = Enum("D3DTRANSFORMSTATETYPE", [
"D3DTS_VIEW",
"D3DTS_PROJECTION",
"D3DTS_TEXTURE0",
"D3DTS_TEXTURE1",
"D3DTS_TEXTURE2",
"D3DTS_TEXTURE3",
"D3DTS_TEXTURE4",
"D3DTS_TEXTURE5",
"D3DTS_TEXTURE6",
"D3DTS_TEXTURE7",
"D3DTS_WORLD",
"D3DTS_WORLD1",
"D3DTS_WORLD2",
"D3DTS_WORLD3",
])
D3DMATERIALCOLORSOURCE = Enum("D3DMATERIALCOLORSOURCE", [
"D3DMCS_MATERIAL",
"D3DMCS_COLOR1",
"D3DMCS_COLOR2",
])
D3DWRAPCOORD = Flags(DWORD, [
"D3DWRAPCOORD_0",
"D3DWRAPCOORD_1",
"D3DWRAPCOORD_2",
"D3DWRAPCOORD_3",
])
D3DCOLORWRITEENABLE = Flags(DWORD, [
"D3DCOLORWRITEENABLE_RED",
"D3DCOLORWRITEENABLE_GREEN",
"D3DCOLORWRITEENABLE_BLUE",
"D3DCOLORWRITEENABLE_ALPHA",
])
D3DDEGREETYPE = Enum("D3DDEGREETYPE", [
"D3DDEGREE_LINEAR",
"D3DDEGREE_QUADRATIC",
"D3DDEGREE_CUBIC",
"D3DDEGREE_QUINTIC",
])
D3DPATCHEDGESTYLE = Enum("D3DPATCHEDGESTYLE", [
"D3DPATCHEDGE_DISCRETE",
"D3DPATCHEDGE_CONTINUOUS",
])
D3DVERTEXBLENDFLAGS = Enum("D3DVERTEXBLENDFLAGS", [
"D3DVBF_DISABLE",
"D3DVBF_1WEIGHTS",
"D3DVBF_2WEIGHTS",
"D3DVBF_3WEIGHTS",
"D3DVBF_TWEENING",
"D3DVBF_0WEIGHTS",
])
D3DDEBUGMONITORTOKENS = Enum("D3DDEBUGMONITORTOKENS", [
"D3DDMT_ENABLE",
"D3DDMT_DISABLE",
])
# TODO: Convert these to/from actual floats
FLOAT_AS_DWORD = DWORD
D3DRENDERSTATETYPE, D3DRENDERSTATEVALUE = EnumPolymorphic("D3DRENDERSTATETYPE", "State", [
("D3DRS_ZENABLE", D3DZBUFFERTYPE),
("D3DRS_FILLMODE", D3DFILLMODE),
("D3DRS_SHADEMODE", D3DSHADEMODE),
("D3DRS_ZWRITEENABLE", BOOL),
("D3DRS_ALPHATESTENABLE", BOOL),
("D3DRS_LASTPIXEL", BOOL),
("D3DRS_SRCBLEND", D3DBLEND),
("D3DRS_DESTBLEN
|
D", D3DBLEND),
("D3DRS_CULLMODE", D3DCULL),
("D3DRS_ZFUNC", D3DCMPFUNC),
("D3DRS_ALPHAREF", DWORD),
("D3DRS_ALPHAFUNC", D3DCMPFUNC),
("D3DRS_DITHERENABLE", BOOL),
("D3DRS_ALPHABLENDENABLE", BOOL),
("D3DRS_FOGENABLE", BOOL),
("D3DRS_SPECULARENABLE", BOOL),
|
("D3DRS_FOGCOLOR", D3DCOLOR),
("D3DRS_FOGTABLEMODE", D3DFOGMODE),
("D3DRS_FOGSTART", FLOAT_AS_DWORD),
("D3DRS_FOGEND", FLOAT_AS_DWORD),
("D3DRS_FOGDENSITY", FLOAT_AS_DWORD),
("D3DRS_RANGEFOGENABLE", BOOL),
("D3DRS_STENCILENABLE", BOOL),
("D3DRS_STENCILFAIL", D3DSTENCILOP),
("D3DRS_STENCILZFAIL", D3DSTENCILOP),
("D3DRS_STENCILPASS", D3DSTENCILOP),
("D3DRS_STENCILFUNC", D3DCMPFUNC),
("D3DRS_STENCILREF", DWORD),
("D3DRS_STENCILMASK", DWORD),
("D3DRS_STENCILWRITEMASK", DWORD),
("D3DRS_TEXTUREFACTOR", D3DCOLOR),
("D3DRS_WRAP0", D3DWRAPCOORD),
("D3DRS_WRAP1", D3DWRAPCOORD),
("D3DRS_WRAP2", D3DWRAPCOORD),
("D3DRS_WRAP3", D3DWRAPCOORD),
("D3DRS_WRAP4", D3DWRAPCOORD),
("D3DRS_WRAP5", D3DWRAPCOORD),
("D3DRS_WRAP6", D3DWRAPCOORD),
("
|
gabriellmb05/trabalho-les
|
src/project_manager/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 2,659 | 0.004137 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-02 20:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
|
operations = [
migrations.CreateModel(
name='Credencial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=60, unique=True)),
('password', models.Char
|
Field(max_length=255)),
('token', models.CharField(blank=True, max_length=60, unique=True)),
('agente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Ferramenta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=60, unique=True)),
('link', models.URLField()),
],
),
migrations.CreateModel(
name='Linguagem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=60, unique=True)),
],
),
migrations.CreateModel(
name='Projeto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=60, unique=True)),
('dono', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dono', to=settings.AUTH_USER_MODEL)),
('ferramentas', models.ManyToManyField(related_name='ferramentas', to='project_manager.Ferramenta')),
('linguagem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='linguagem', to='project_manager.Linguagem')),
('participantes', models.ManyToManyField(related_name='participantes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='credencial',
name='ferramenta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project_manager.Ferramenta'),
),
]
|
LordPharaoh/typecat
|
typecat/display/fontbox.py
|
Python
|
mit
| 1,483 | 0.00472 |
import typecat.font2img as f2i
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class FontBox(Gtk.FlowBoxChild):
def set_text(self, arg1):
if type(arg1) is str:
self.text = arg1
if type(arg1) is int:
self.font_size = arg1
try:
self.box.destroy()
except AttributeError:
pass
self.box = Gtk.Box()
self.box.set_border_width(5)
self.image = G
|
tk.Image(halign=Gtk.Align.CENTER)
self.font.set_size(self.font_size)
self.image.set_from_pixbuf(f2i.multiline_gtk(self.text, self.font.pilfont, self.
|
size, background=self.bg, foreground=self.fg))
self.box.pack_start(self.image, True, False, 0)
self.frame.add(self.box)
self.show_all()
def __init__(self, font, text="Handgloves", size=(200, 150), font_size=75):
Gtk.FlowBoxChild.__init__(self)
self.frame = Gtk.Frame()
self.set_border_width(5)
self.font = font
self.font_size = int(size[0]/9)
self.font.set_size(self.font_size)
self.text = text
self.size = size
self.title = self.font.name if len(self.font.name) < 30 else self.font.name[:27] + "..."
self.frame.set_label(self.title)
self.frame.set_label_align(.1, 0)
entry = Gtk.Entry()
self.bg = (255, 255, 255)
self.fg = (0, 0, 0)
self.set_text(text)
self.add(self.frame)
|
smartshark/serverSHARK
|
smartshark/sparkconnector.py
|
Python
|
apache-2.0
| 2,541 | 0.00669 |
import server.settings
import requests
import json
import re
class BatchJob(object):
def __init__(self, id, state, log):
self.id = id
self.state = state
self.log = log
def __str__(self):
return 'id: %s, state: %s, log: %s' % (self.id, self.state, '\n'.join(self.log))
class SparkConnector(object):
def __init__(self):
self.address = "http://%s:%s" % (server.settings.SPARK_MASTER['host'], server.settings.SPARK_MASTER['port'])
self.json_header = {'Content-Type': 'application/json'}
self.batches_endpoint = self.address + '/batches'
def submit_batch_job(self, file_path, proxy_user=None, class_name=None, args=[], conf=None):
# Create data stuff
data = {
'file': file_path,
'proxy_user': proxy_user,
'class_name': class_name,
'args': args,
'conf': conf
}
# filter out if empty or none
data = {k: v for k, v in data.items() if v is not None and v}
ret = requests.post(self.batches_endpoint, data=json.dumps(data), headers=self.json_header)
return self.create_batch_object(ret.json())
def get_active_batch_jobs(self):
ret = requests.get(self.batches_endpoint)
batch_jobs = []
for batch_job in ret.json()['sessions']:
batch_jobs.append(self.create_batch_object(batch_job))
return batch_jobs
def get_log_from_batch_job(self, batch_id, from_log=0, size_log=2000, only_user_output=False):
payload = {'from': from_log, 'size': size_log}
ret = requests.get(self.batches_endpoint+'/'+str(batch_id)+'/log', params=payload)
if only_user_output:
pattern = re.compile("\d{2}[:/]\d{2}[:/]\d{2}")
output = []
for line in ret.json()['log'][1:]:
if pattern.match(line) is None:
output.append(line)
return '\n'.join(output)
else:
return '\n'.join(ret.json()['log'])
def kill_batch_job(self, batch_id):
ret = requests.delete(self.batches_endpoint+'/'+str(batch_id))
if ret.json()['msg'] == 'deleted':
return True
return False
@
|
staticmethod
def create_batch_object(data_dict):
return BatchJob(data_
|
dict['id'], data_dict['state'], data_dict['log'])
#sc = SparkConnector()
#bj = sc.submit_batch_job('/home/ftrauts/Arbeit/spark/examples/src/main/python/pi.py')
#print(sc.get_log_from_batch_job(4, only_user_output=True))
|
cliffano/swaggy-jenkins
|
clients/python-aiohttp/generated/openapi_server/models/queue_left_item.py
|
Python
|
mit
| 8,986 | 0.003116 |
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.cause_action import CauseAction
from openapi_server.models.free_style_build import FreeStyleBuild
from openapi_server.models.free_style_project import FreeStyleProject
from openapi_server import util
class QueueLeftItem(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, actions: List[CauseAction]=None, blocked: bool=None, buildable: bool=None, id: int=None, in_queue_since: int=None, params: str=None, stuck: bool=None, task: FreeStyleProject=None, url: str=None, why: str=None, cancelled: bool=None, executable: FreeStyleBuild=None):
"""QueueLeftItem - a model defined in OpenAPI
:param _class: The _class of this QueueLeftItem.
:param actions: The actions of this QueueLeftItem.
:param blocked: The blocked of this QueueLeftItem.
:param buildable: The buildable of this QueueLeftItem.
:param id: The id of this QueueLeftItem.
:param in_queue_since: The in_queue_since of this QueueLeftItem.
:param params: The params of this QueueLeftItem.
:param stuck: The stuck of this QueueLeftItem.
:param task: The task of this QueueLeftItem.
:param url: The url of this QueueLeftItem.
:param why: The why of this QueueLeftItem.
:param cancelled: The cancelled of this QueueLeftItem.
:param executable: The executable of this QueueLeftItem.
"""
self.openapi_types = {
'_class': str,
'actions': List[CauseAction],
'blocked': bool,
'buildable': bool,
'id': int,
'in_queue_since': int,
'params': str,
'stuck': bool,
'task': FreeStyleProject,
'url': str,
'why': str,
'cancelled': bool,
'executable': FreeStyleBuild
}
self.attribute_map = {
'_class': '_class',
'actions': 'actions',
'blocked': 'blocked',
'buildable': 'buildable',
'id': 'id',
'in_queue_since': 'inQueueSince',
'params': 'params',
'stuck': 'stuck',
'task': 'task',
'url': 'url',
'why': 'why',
'cancelled': 'cancelled',
'executable': 'executable'
}
self.__class = _class
self._actions = actions
self._blocked = blocked
self._buildable = buildable
self._id = id
self._in_queue_since = in_queue_since
self._params = params
self._stuck = stuck
self._task = task
self._url = url
self._why = why
self._cancelled = cancelled
self._executable = executable
@classmethod
def from_dict(cls, dikt: dict) -> 'QueueLeftItem':
"""Returns the dict as a model
:param dikt: A dict.
:return: The QueueLeftItem of this QueueLeftItem.
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this QueueLeftItem.
:return: The _class of this QueueLeftItem.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this QueueLeftItem.
:param _class: The _class of this QueueLeftItem.
:type _class: str
"""
self.__class = _class
@property
def actions(self):
"""Gets the actions of this
|
QueueLeftItem.
:return: The actions of this QueueLeftItem.
:rtype: List[CauseAction]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""Sets the actions of this QueueLeftItem.
:param actions: The actions of this QueueLeftItem.
:type actions: List[CauseAction]
"""
self._actions = actions
|
@property
def blocked(self):
"""Gets the blocked of this QueueLeftItem.
:return: The blocked of this QueueLeftItem.
:rtype: bool
"""
return self._blocked
@blocked.setter
def blocked(self, blocked):
"""Sets the blocked of this QueueLeftItem.
:param blocked: The blocked of this QueueLeftItem.
:type blocked: bool
"""
self._blocked = blocked
@property
def buildable(self):
"""Gets the buildable of this QueueLeftItem.
:return: The buildable of this QueueLeftItem.
:rtype: bool
"""
return self._buildable
@buildable.setter
def buildable(self, buildable):
"""Sets the buildable of this QueueLeftItem.
:param buildable: The buildable of this QueueLeftItem.
:type buildable: bool
"""
self._buildable = buildable
@property
def id(self):
"""Gets the id of this QueueLeftItem.
:return: The id of this QueueLeftItem.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this QueueLeftItem.
:param id: The id of this QueueLeftItem.
:type id: int
"""
self._id = id
@property
def in_queue_since(self):
"""Gets the in_queue_since of this QueueLeftItem.
:return: The in_queue_since of this QueueLeftItem.
:rtype: int
"""
return self._in_queue_since
@in_queue_since.setter
def in_queue_since(self, in_queue_since):
"""Sets the in_queue_since of this QueueLeftItem.
:param in_queue_since: The in_queue_since of this QueueLeftItem.
:type in_queue_since: int
"""
self._in_queue_since = in_queue_since
@property
def params(self):
"""Gets the params of this QueueLeftItem.
:return: The params of this QueueLeftItem.
:rtype: str
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this QueueLeftItem.
:param params: The params of this QueueLeftItem.
:type params: str
"""
self._params = params
@property
def stuck(self):
"""Gets the stuck of this QueueLeftItem.
:return: The stuck of this QueueLeftItem.
:rtype: bool
"""
return self._stuck
@stuck.setter
def stuck(self, stuck):
"""Sets the stuck of this QueueLeftItem.
:param stuck: The stuck of this QueueLeftItem.
:type stuck: bool
"""
self._stuck = stuck
@property
def task(self):
"""Gets the task of this QueueLeftItem.
:return: The task of this QueueLeftItem.
:rtype: FreeStyleProject
"""
return self._task
@task.setter
def task(self, task):
"""Sets the task of this QueueLeftItem.
:param task: The task of this QueueLeftItem.
:type task: FreeStyleProject
"""
self._task = task
@property
def url(self):
"""Gets the url of this QueueLeftItem.
:return: The url of this QueueLeftItem.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this QueueLeftItem.
:param url: The url of this QueueLeftItem.
:type url: str
"""
self._url = url
@property
def why(self):
"""Gets the why of this QueueLeftItem.
:return: The why of this QueueLeftItem.
:rtype: str
"""
return self._why
@why.setter
def why(self, why):
"""Sets the why of this QueueLeftItem.
:param why: The why of this QueueLeftItem.
:type why: str
"""
self._why = why
@property
def cancelled(self):
"""Gets the cancelled of this QueueLeftItem.
:return: The cancelled of this QueueLeftItem.
:rtype:
|
CodeNameGhost/shiva
|
thirdparty/scapy/arch/bpf/__init__.py
|
Python
|
mit
| 79 | 0 |
# Gu
|
illaume Valadon <guillaume@valadon.net>
"""
Scapy *BSD native support
"""
| |
ktan2020/legacy-automation
|
win/Lib/site-packages/selenium/webdriver/ie/__init__.py
|
Python
|
mit
| 643 | 0 |
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed
|
to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License f
|
or the specific language governing permissions and
# limitations under the License.
|
ldgit/remote-phpunit
|
tests/app/commands/test_file_command.py
|
Python
|
mit
| 7,821 | 0.005114 |
import unittest
from app.commands.file_command import FileCommand
class TestFileCommand(unittest.TestCase):
def setUp(self):
self.window = WindowSpy()
self.settings = PluginSettingsStub()
self.sublime = SublimeSpy()
self.os_path = OsPathSpy()
# SUT
self.command = FileCommand(self.settings, self.os_path, self.sublime)
def test_open_source_file(self):
self.settings.tests_folder = 'tests/unit'
self.command.open_source_file('C:/path/to/root/tests/unit/path/to/fileTest.php', sel
|
f.window)
|
self.assertEqual('C:/path/to/root/path/to/file.php', self.window.file_to_open)
def test_open_source_file_works_with_backslashes(self):
self.settings.tests_folder = 'tests/unit'
self.command.open_source_file('C:\\path\\to\\root\\tests\\unit\\path\\to\\fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/file.php', self.window.file_to_open)
def test_open_source_file_works_for_network_paths(self):
self.settings.tests_folder = 'tests'
self.command.open_source_file('\\\\server\\dev\\root\\tests\\unit\\Service\\SearchParametersMapperTest.php',
self.window)
self.assertEqual('\\\\server\\dev\\root\\Service\\SearchParametersMapper.php', self.window.file_to_open)
def test_open_source_file_works_for_network_paths_and_complex_tests_folder(self):
self.settings.tests_folder = 'tests/unit'
self.command.open_source_file('\\\\server\\dev\\root\\tests\\unit\\Service\\SearchParametersMapperTest.php',
self.window)
self.assertEqual('\\\\server\\dev\\root\\Service\\SearchParametersMapper.php', self.window.file_to_open)
def test_open_source_file_when_tests_folder_is_not_unit_test_folder(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests_folder'
self.command.open_source_file('C:/path/to/root/tests_folder/unit/path/to/fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/file.php', self.window.file_to_open)
def test_open_source_file_remove_only_first_appearance_of_tests_folder_in_path(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests'
self.command.open_source_file('C:/path/to/root/tests/unit/path/to/tests/fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/tests/file.php', self.window.file_to_open)
def test_open_source_file_when_tests_folder_is_not_unit_test_folder_remove_only_unit_folder_after_test_path(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests_folder'
self.command.open_source_file('C:/path/to/root/tests_folder/unit/path/to/unit/fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/unit/file.php', self.window.file_to_open)
def test_if_source_file_exists_return_true(self):
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
actual = self.command.source_file_exists('C:\\path\\to\\root\\tests\\unit\\path\\to\\fileTest.php')
self.assertTrue(actual)
self.assertEqual('C:/path/to/root/path/to/file.php', self.os_path.isfile_received_filepath)
def test_source_file_does_not_exist_if_file_already_is_a_source_file(self):
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
actual = self.command.source_file_exists('root\path\src\Gallery\ImageType.php')
self.assertFalse(actual)
def test_if_source_file_does_not_exist_return_false(self):
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = False
self.assertFalse(self.command.source_file_exists('C:/path/to/root/path/to/fileTest.php'))
self.assertEqual('C:/path/to/root/path/to/file.php', self.os_path.isfile_received_filepath)
def test_if_source_file_is_none_return_false(self):
""" This case is possible when currently opened tab in sublime is untitled (i.e. not yet created) file """
self.assertFalse(self.command.source_file_exists(None))
def test_if_test_file_is_none_return_false(self):
""" This case is possible when currently opened tab in sublime is untitled (i.e. not yet created) file """
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests/unit'
self.assertFalse(self.command.test_file_exists(None, self.window))
def test_open_file(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests/unit'
self.command.open_test_file('C:/path/to/root/path/to/file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.window.file_to_open)
def test_correct_file_name_sent_to_os_is_file_method(self):
self.window.project_root = 'C:/path/to/root'
self.settings.root = ''
self.settings.tests_folder = 'tests/unit'
self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath)
def test_file_exists_ignores_trailing_slash_in_root_path(self):
self.window.project_root = 'C:/path/to/root/'
self.settings.root = ''
self.settings.tests_folder = 'tests/unit'
self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath)
def test_if_test_file_exists_return_true(self):
self.settings.root = 'C:/path/to/root/'
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
self.assertTrue(self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window))
def test_test_file_exists_returns_true_if_test_file_is_input(self):
self.settings.root = 'C:/path/to/root/'
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
self.assertTrue(self.command.test_file_exists('C:/path/to/root/tests/unit/path/to/fileTest.php', self.window))
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath,
'Expected test file filepath as parameter to isfile')
def test_if_test_file_does_not_exist_return_false(self):
self.settings.root = 'C:/path/to/root/'
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = False
self.assertFalse(self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window))
def test_replace_back_slashes_with_forward_slashes(self):
self.window.project_root = 'C:\\path\\to\\root'
self.settings.root = ''
self.settings.tests_folder = 'tests\\unit'
self.command.test_file_exists('C:\\path\\to\\root\\path\\to\\file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath)
class PluginSettingsStub:
pass
class WindowSpy:
def __init__(self):
self.file_to_open = None
self.project_root = None
def folders(self):
return [self.project_root]
def open_file(self, file_to_open):
self.file_to_open = file_to_open
class OsPathSpy:
def __init__(self):
self.is_file_returns = None
self.isfile_received_filepath = None
def isfile(self, filepath):
self.isfile_received_filepath = filepath
return self.is_file_returns
class SublimeSpy:
pass
|
kstilwell/tcex
|
tests/batch/test_attributes_1.py
|
Python
|
apache-2.0
| 1,808 | 0 |
"""Test the TcEx Batch Module."""
# third-party
import pytest
class TestAttributes:
"""Test the TcEx Batch Module."""
@pytest.mark.parametrize(
'name,description,attr_type,attr_value,displayed,source',
[
(
'pytest-adversary-i1-001',
'Attribute Testing',
'Description',
'Pytest',
True,
'pytest-testing',
)
],
)
def test_attributes( # pylint: disable=unused-argument
self, name, description, attr_type, attr_value, displayed, source, tcex
):
"""Test batch attributes creation"""
batch = tcex.batch(owner='TCI')
xid = batch.generate_xid(['pytest', 'adversary', name])
ti = batch.adversary(name=name, xid=xid)
# security label testing - option 1
ti.attribute(
attr_type=attr_type,
attr_value=attr_value,
displayed=displayed,
source=source,
formatter=self.attribute_formatter,
)
# security label testing - option 2
attr = ti.attribute(attr_type=attr_type, attr_value=None)
attr.displayed = displayed
attr.source = source
tcex.log.debug(f'attribute data: {attr}') # coverage: __str__ method
assert a
|
ttr.displayed == displayed
assert attr.source == source
assert attr.type == attr_type
assert attr.value is None
# submit batch
batch.save(ti)
batch_status = batch.submit_all()
assert batch_status[0].get('status') == 'Completed'
assert batch_status[0].get('successCount') == 1
@staticmethod
def attribute_formatter(attr_value):
"""Return formatted tag."""
return attr_value.low
|
er()
|
KaranToor/MA450
|
google-cloud-sdk/lib/surface/config/configurations/list.py
|
Python
|
apache-2.0
| 2,092 | 0.003824 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list named configuration."""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core.configurations import named_configs
from googlecloudsdk.core.configurations import properties_file
class List(base.ListCommand):
"""Lists existing named configurations."""
detailed_help = {
'DESCRIPTION': """\
{description}
Run `$ gcloud topic configurations` for an overview of named
|
configurations.
""",
'EXAMPLES': """\
To list all available configurations, run:
|
$ {command}
""",
}
@staticmethod
def Args(parser):
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
configs = named_configs.ConfigurationStore.AllConfigs()
for _, config in sorted(configs.iteritems()):
props = properties.VALUES.AllValues(
list_unset=True,
properties_file=properties_file.PropertiesFile([config.file_path]),
only_file_contents=True)
yield {
'name': config.name,
'is_active': config.is_active,
'properties': props,
}
def Format(self, args):
return ('table('
'name,'
'is_active,'
'properties.core.account,'
'properties.core.project,'
'properties.compute.zone:label=DEFAULT_ZONE,'
'properties.compute.region:label=DEFAULT_REGION)')
|
muchu1983/104_cameo
|
test/unit/test_spiderForTECHORANGE.py
|
Python
|
bsd-3-clause
| 1,235 | 0.008425 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
from cameo.spiderForTECHORANGE import SpiderForTECHORANGE
"""
測試 抓取 TECHORANGE
"""
class SpiderForTECHORANGETest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.INFO)
self.spider = SpiderForTECHORANGE()
self.spider.initDriver()
|
#收尾
def tearDown(self):
self.spider.quitDriver()
"""
#測試抓取 index page
def test_downloadIndexPage(self):
logging.info("SpiderForTECHORANGETest.test_downloadIndexPage")
self.spider.downloadIndexPage()
#測試抓取 tag page
def test_downloadTagPage(self):
logging.info("SpiderForTECHORANGETest.test_downloadTagPage")
self.spider.downloadTagPag()
"""
#測試抓取 news page
def test_downloadNewsPage(self):
|
logging.info("SpiderForTECHORANGETest.test_downloadNewsPage")
self.spider.downloadNewsPage(strTagName=None)
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
|
eayunstack/neutron
|
neutron/tests/unit/agent/l3/test_agent.py
|
Python
|
apache-2.0
| 165,163 | 0.000121 |
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from itertools import chain as iter_chain
from itertools import combinations as iter_combinations
import eventlet
import mock
import netaddr
from neutron_lib.agent import constants as agent_consts
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as lib_constants
from neutron_lib import exceptions as exc
from neutron_lib.plugins import constants as plugin_constants
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_utils import timeutils
from oslo_utils import uuidutils
from testtools import matchers
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_router_base
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3router
from neutron.agent.l3 import router_processing_queue
from neutron.agent.linux import dibbler
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import pd
from neutron.agent.linux import ra
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.conf.agent import common as agent_config
from neutron.conf.agent.l3 import config as l3_config
from neutron.conf.agent.l3 import ha as ha_conf
from neutron.conf import common as base_config
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
FAKE_ID_2 = _uuid()
FIP_PRI = 32768
class BasicRouterOperationsFramework(base.BaseTestCase):
def setUp(self):
super(BasicRouterOperationsFramework, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf)
ha_conf.register_l3_agent_ha_opts(self.conf)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
agent_config.register_availability_zone_opts_helper(self.conf)
agent_config.register_interface_opts(self.conf)
agent_config.register_external_process_opts(self.conf)
agent_config.register_pd_opts(self.conf)
agent_config.register_ra_opts(self.conf)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('state_path', cfg.CONF.state_path)
self.conf.set_override('pd_dhcp_driver', '')
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.list_network_namespaces_p = mock.patch(
'neutron.agent.linux.ip_lib.list_network_namespaces')
self.list_network_namespaces = self.list_network_namespaces_p.start()
self.ensure_dir = mock.patch(
'oslo_utils.fileutils.ensure_tree').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron_lib.utils.file.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'mtu': 1500,
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'mtu': 1450,
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(),
|
'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _process_router_instance_for_agent(self, agent, ri, router):
ri.router = router
if not ri.radvd:
ri.radvd = ra.DaemonMonitor(router['id'],
|
ri
|
neuroidss/nupic.research
|
htmresearch/frameworks/location/location_network_creation.py
|
Python
|
agpl-3.0
| 17,520 | 0.005936 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
The methods here contain factories to create networks of multiple layers for
experimenting with grid cell location layer (L6a)
"""
import copy
import json
from htmresearch.frameworks.location.path_integration_union_narrowing import (
computeRatModuleParametersFromReadoutResolution,
computeRatModuleParametersFromCellCount)
from nupic.engine import Network
def createL4L6aLocationColumn(network, L4Params, L6aParams,
inverseReadoutResolution=None,
baselineCellsPerAxis=6, suffix=""):
"""
Create a single column network containing L4 and L6a layers. L4 layer
processes sensor inputs while L6a processes motor commands using grid cell
modules. Sensory input is represented by the feature's active columns and
motor input is represented by the displacement vector [dx, dy].
The grid cell modules used by this network are based on
:class:`ThresholdedGaussian2DLocationModule` where the firing rate is computed
from on one or more Gaussian activity bumps. The cells are distributed
uniformly through the rhombus, packed in the optimal hexagonal arrangement.
::
Phase
----- +-------+
+---------->| |<------------+
[2] | +---->| L4 |--winner---+ |
| | | | | |
| | +-------+ | |
| | | ^ | |
| | | | | |
| | | | | |
| | v | | |
| | +-------+ | |
| | | | | |
[1,3] | +---->| L6a |<----------+ |
| | | |--learnable--+
| | +-------+
| | ^
feature reset |
| | |
| | |
[0] [sensorInput] [motorInput]
.. note::
Region names are "motorInput", "sensorInput", "L4", and "L6a".
Each name has an optional string suffix appended to it.
:param network: network to add the column
:type network: Network
:param L4Params: constructor parameters for :class:`ApicalTMPairRegion`
:type L4Params: dict
:param L6aParams: constructor parameters for :class:`Guassian2DLocationRegion`
:type L6aParams: dict
:param inverseReadoutResolution: Optional readout resolution.
The readout resolution specifies the diameter of the circle of phases in the
rhombus encoded by a bump. See `createRatModuleFromReadoutResolution.
:type inverseReadoutResolution: int
:param baselineCellsPerAxis: The baselineCellsPerAxis implies the readout
resolution of a grid cell module. If baselineCellsPerAxis=6, that implies
that the readout resolution is approximately 1/3. If baselineCellsPerAxis=8,
the readout resolution is approximately 1/4
:type baselineCellsPerAxis: int or float
:param suffix: optional string suffix appended to region name. Useful when
creating multicolumn networks.
:type suffix: str
:return: Reference to the given network
:rtype: Network
"""
L6aParams = copy.deepcopy(L6aParams)
if inverseReadoutResolution is not None:
# Configure L6a based on 'resolution'
params = computeRatModuleParametersFromReadoutResolution(inverseReadoutResolution)
L6aParams.update(params)
else:
params = computeRatModuleParametersFromCellCount(L6aParams["cellsPerAxis"],
baselineCellsPerAxis)
L6aParams.update(params)
numOfcols = L4Params["columnCount"]
cellsPerCol = L4Params["cellsPerColumn"]
L6aParams["anchorInputSize"] = numOfcols * cellsPerCol
# Configure L4 'basalInputSize' to be compatible L6a output
moduleCount = L6aParams["moduleCount"]
cellsPerAxis = L6aParams["cellsPerAxis"]
L4Params = copy.deepcopy(L4Params)
L4Params["basalInputWidth"] = moduleCount * cellsPerAxis * cellsPerAxis
# Configure sensor output to be compatible with L4 params
columnCount = L4Params["columnCount"]
# Add regions to network
motorInputName = "motorInput" + suffix
sensorInputName = "sensorInput" + suffix
L4Name = "L4" + suffix
L6aName = "L6a" + suffix
network.addRegion(sensorInputName, "py.RawSensor",
json.dumps({"outputWidth": columnCount}))
network.addRegion(motorInputName, "py.RawValues",
json.dumps({"outputWidth": 2}))
network.addRegion(L4Name, "py.ApicalTMPairRegion", json.dumps(L4Params))
network.addRegion(L6aName, "py.Guassian2DLocationRegion",
json.dumps(L6aParams))
# Link sensory input to L4
network.link(sensorInputName, L4Name, "UniformLink", "",
srcOutput="dataOut", destInput="activeColumns")
# Link motor input to L6a
network.link(motorInputName, L6aName, "
|
UniformLink", "",
srcOutput="dataOut", destInput="displacement")
# Link L6a to L4
network.link(L6aName, L4Name, "UniformLink", "",
srcOutput="activeCells", destInput="basalInput")
network.link(L6aName, L4Name, "UniformLink", "",
srcOutput="learnableCells", destInp
|
ut="basalGrowthCandidates")
# Link L4 feedback to L6a
network.link(L4Name, L6aName, "UniformLink", "",
srcOutput="activeCells", destInput="anchorInput")
network.link(L4Name, L6aName, "UniformLink", "",
srcOutput="winnerCells", destInput="anchorGrowthCandidates")
# Link reset signal to L4 and L6a
network.link(sensorInputName, L4Name, "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link(sensorInputName, L6aName, "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
# Set phases appropriately
network.setPhases(motorInputName, [0])
network.setPhases(sensorInputName, [0])
network.setPhases(L4Name, [2])
network.setPhases(L6aName, [1, 3])
return network
def createL246aLocationColumn(network, L2Params, L4Params, L6aParams,
baselineCellsPerAxis=6,
inverseReadoutResolution=None, suffix=""):
"""
Create a single column network composed of L2, L4 and L6a layers.
L2 layer computes the object representation using :class:`ColumnPoolerRegion`,
L4 layer processes sensors input while L6a processes motor commands using grid
cell modules. Sensory input is represented by the feature's active columns and
motor input is represented by the displacement vector [dx, dy].
The grid cell modules used by this network are based on
:class:`ThresholdedGaussian2DLocationModule` where the firing rate is computed
from on one or more Gaussian activity bumps. The cells are distributed
uniformly through the rhombus, packed in the optimal hexagonal arrangement.
::
Phase +-------+
----- reset | |
+----->| L2 |<------------------+
[3] | | | |
| +-------+
|
philiptzou/clincoded
|
src/clincoded/renderers.py
|
Python
|
mit
| 7,833 | 0.000383 |
from pkg_resources import resource_filename
from pyramid.events import (
BeforeRender,
subscriber,
)
from pyramid.httpexceptions import (
HTTPMovedPermanently,
HTTPPreconditionFailed,
HTTPUnauthorized,
HTTPUnsupportedMediaType,
)
from pyramid.security import forget
from pyramid.settings import asbool
from pyramid.threadlocal import (
manager,
)
from pyramid.traversal import (
split_path_info,
_join_path_tuple,
)
from contentbase.validation import CSRFTokenError
from subprocess_middleware.tween import SubprocessTween
import logging
import os
import psutil
import time
log = logging.getLogger(__name__)
def includeme(config):
config.add_tween(
'.renderers.fix_request_method_tween_factory',
under='contentbase.stats.stats_tween_factory')
config.add_tween(
'.renderers.normalize_cookie_tween_factory',
under='.renderers.fix_request_method_tween_factory')
config.add_tween('.renderers.page_or_json', under='.renderers.normalize_cookie_tween_factory')
config.add_tween('.renderers.security_tween_factory', under='pyramid_tm.tm_tween_factory')
config.scan(__name__)
def fix_request_method_tween_factory(handler, registry):
""" Fix Request method changed by mod_wsgi.
See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2
Apache config:
SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD
"""
def fix_request_method_tween(request):
environ = request.environ
if 'X_REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD']
return handler(request)
return fix_request_method_tween
def security_tween_factory(handler, registry):
def security_tween(request):
login = None
expected_user = request.headers.get('X-If-Match-User')
if expected_user is not None:
login = request.authenticated_userid
if login != 'mailto.' + expected_user:
detail = 'X-If-Match-User does not match'
raise HTTPPreconditionFailed(detail)
# wget may only send credentials following a challenge response.
auth_challenge = asbool(request.headers.get('X-Auth-Challenge', False))
if auth_challenge or request.authorization is not None:
login = request.authenticated_userid
if login is None:
raise HTTPUnauthorized(headerlist=forget(request))
if request.method in ('GET', 'HEAD'):
return handler(request)
if request.content_type != 'application/json':
detail = "%s is not 'application/json'" % request.content_type
raise HTTPUnsupportedMediaType(detail)
token = request.headers.get('X-CSRF-Token')
if token is not None:
# Avoid dirtying the session and adding a Set-Cookie header
# XXX Should consider if this is a good idea or not and timeouts
if token == dict.get(request.session, '_csrft_', None):
return handler(request)
raise CSRFTokenError('Incorrect CSRF token')
if login is None:
login = request.authenticated_userid
if login is not None:
namespace, userid = login.split('.', 1)
if namespace not in ('mailto', 'persona'):
return handler(request)
raise CSRFTokenError('Missing CSRF token')
return security_tween
def normalize_cookie_tween_factory(handler, registry):
from webob.cookies import Cookie
ignore = {
'/favicon.ico',
}
def normalize_cookie_tween(request):
if request.path in ignore or request.path.startswith('/static/'):
return handler(request)
session = request.session
if session or session._cookie_name not in request.cookies:
return handler(request)
response = handler(request)
existing = response.headers.getall('Set-Cookie')
if existing:
cookies = Cookie()
for header in existing:
cookies.load(header)
if session._cookie_name in cookies:
return response
response.delete_cookie(
session._cookie_name,
path=session._cookie_path,
domain=session._cookie_domain,
)
return response
return normalize_cookie_tween
@subscriber(BeforeRender)
def set_x_request_url(event):
# Used by fetch polyfill and server rendering
request = event['request']
request.response.headers['X-Request-URL'] = request.url
@subscriber(BeforeRender)
def canonical_redirect(event):
request = event['request']
# Ignore subrequests
if len(manager.stack) > 1:
return
if request.method not in ('GET', 'HEAD'):
return
if request.response.status_int != 200:
return
if not request.environ.get('clincoded.canonical_redirect', True):
return
if request.path_info == '/':
return
canonical_path = event.rendering_val.get('@id', None)
if canonical_path is None:
return
canonical_path = canonical_path.split('?', 1)[0]
request_path = _join_path_tuple(('',) + split_path_info(request.path_info))
if (request_path == canonical_path.rstrip('/') and
request.path_info.endswith('/') == canonical_path.endswith('/')):
return
if '/@@' in request.path_info:
return
qs = request.query_string
location = canonical_path + ('?' if qs else '') + qs
raise HTTPMovedPermanently(location=location)
def should_transform(request, response):
if request.method not in ('GET', 'HEAD'):
return False
if response.content_type != 'application/json':
return False
format = request.params.get('format')
if format is None:
original_vary = response.vary or ()
response.vary = original_vary + ('Accept', 'Authorization')
if request.authorization is not None:
format = 'json'
else:
mime_type = request.accept.best_match(
[
'text/html',
'application/ld+json',
'application/json',
],
'text/html')
format = mime_type.split('/', 1)[1]
if format == 'ld+json':
format = 'json'
else:
format = format.lower()
if format not in ('html', 'json'):
format = 'html'
if format == 'json':
return False
request._transform_start = time.time()
return True
def after_transform(request, response):
end = time.time()
duration = int((end - request._transform_start) * 1e6)
stats = request._stats
stats['render_count'] = stats.get('render_count', 0) + 1
stats['render_time']
|
= stats.get('render_time', 0) + duration
request._stats_html_attribute = True
# Rendering huge pa
|
ges can make the node process memory usage explode.
# Ideally we would let the OS handle this with `ulimit` or by calling
# `resource.setrlimit()` from a `subprocess.Popen(preexec_fn=...)`.
# Unfortunately Linux does not enforce RLIMIT_RSS.
# An alternative would be to use cgroups, but that makes per-process limits
# tricky to enforce (we would need to create one cgroup per process.)
# So we just manually check the resource usage after each transform.
rss_limit = 256 * (1024 ** 2) # MB
def reload_process(process):
return psutil.Process(process.pid).memory_info().rss > rss_limit
node_env = os.environ.copy()
node_env['NODE_PATH'] = ''
page_or_json = SubprocessTween(
should_transform=should_transform,
after_transform=after_transform,
reload_process=reload_process,
args=['node', resource_filename(__name__, 'static/build/renderer.js')],
env=node_env,
)
|
tkaitchuck/nupic
|
lang/py/engine/__init__.py
|
Python
|
gpl-3.0
| 23,539 | 0.014741 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import nupic.bindings.engine_internal as engine
from nupic.support.lockattributes import LockAttributesMixin
import functools
basicTypes = ['Byte', 'Int16', 'UInt16', 'Int32', 'UInt32', 'Int64', 'UInt64', 'Real32', 'Real64', 'Handle']
# Import all the array types from engine (there is no HandleArray)
arrayTypes = [t + 'Array' for t in basicTypes[:-1]]
for a in arrayTypes:
exec('from %s import %s as %s' % (engine.__name__, a, a))
# Intercept the default exception handling for the purposes of stripping
# parts of the stack trace that can confuse users. If you want the original
# stack trace define this environment variable
if not 'NTA_STANDARD_PYTHON_UNHANDLED_EXCEPTIONS' in os.environ:
import traceback
import cStringIO
def customExceptionHandler(type, value, tb):
"""Catch unhandled Python exception
The handler prints the original exception info including into a buffer.
It then extracts the original error message (when the exception is raised
inside a Py node additional stacktrace info will be appended in the end)
and saves the original exception to a file called error.txt. It prints
just the error message to the screen and tells the user about the error.txt
file.
"""
# Print the exception info to a string IO buffer for manipulation
buff = cStringIO.StringIO()
traceback.print_exception(type, value, tb, file=buff)
text = buff.getvalue()
# get the lines skip the first one: "Traceback (most recent call last)"
lines = text.split('\n')[1:]
#
# Extract the error message
begin = 0
end = len(lines)
for i, line in enumerate(lines):
if line.startswith('RuntimeError:'):
begin = i
#
# elif line.startswith('Traceback (most recent call last):'):
# end = i
# break
#
message = '\n'.join(lines[begin:end])
message = message[len('Runtime Error:'):]
#stacktrace = lines[end:]
# Get the stack trace if available (default to empty string)
stacktrace = getattr(value, 'stackTrace', '')
# Remove engine from stack trace
lines = [x for x in lines if 'engine' not in x]
failMessage = 'The program failed with the following error message:'
dashes = '-' * len(failMessage)
print
print dashes
print 'Traceback (most recent call last):'
print '\n'.join(lines[:begin-2])
if stacktrace:
print stacktrace
print dashes
print 'The program failed with the following error message:'
print dashes
print message
print
#sys.excepthook = customExceptionHandler
# ------------------------------
#
# T I M E R
#
# ------------------------------
# Expose the timer class directly
# Do it this way instead of bringing engine.Timer
# into the namespace to avoid engine
# in the class name
class Timer(engine.Timer):
pass
# ------------------------------
#
# O S
#
# ------------------------------
# Expose the os class directly
# The only wrapped method is getProcessMemoryUsage()
class OS(engine.OS):
pass
# ------------------------------
#
# D I M E N S I O N S
#
# ------------------------------
class Dimensions(engine.Dimensions):
"""Represent the topology of an N-dimensional region
Basically, it is a list of integers such as: [4, 8, 6]
In this example the topology is a 3 dimensional region with
4 x 8 x 6 nodes.
You can initialize it with a list of dimensions or with no arguments
and then append dimensions.
"""
def __init__(self, *args):
"""Construct a Dimensions object
The constructor can be called with no arguments or with a list
of integers
"""
# Init the base class
engine.Dimensions.__init__(self, *args)
def __str__(self):
return self.toString()
# ------------------------------
#
# A R R A Y
#
# ------------------------------
def Array(dtype, size=None, ref=False):
"""Factory function that creates typed Array or ArrayRef objects
dtype - the data type of the array (as string).
Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64
size - the size of the array. Must be positive integer.
"""
def getArrayType(self):
"""A little function to replace the getType() method of arrays
It returns a string representation of the array element type instead of the
integer value (NTA_BasicType enum) returned by the origianl array
"""
return self._dtype
# ArrayRef can't be allocated
if ref:
assert size is None
index = basicTypes.index(dtype)
if index == -1:
raise Exception('Invalid data type: ' + dtype)
if size and size <= 0:
raise Exception('Array size must be positive')
suffix = 'ArrayRef' if ref else 'Array'
arrayFactory = getattr(en
|
gine, dtype + suffix)
arrayFactory.getType = getArrayType
if size:
a = arrayFactory(size)
else:
a = arrayFactory()
a._dtype = basicTypes[index]
return a
def ArrayRef(dtype):
return Array(dtype, None, True)
# -------------------------------------
#
# C O L L E C T I O N
|
W R A P P E R
#
# -------------------------------------
class CollectionIterator(object):
def __init__(self, collection):
self.collection = collection
self.index = 0
def next(self):
index = self.index
if index == self.collection.getCount():
raise StopIteration
self.index += 1
return self.collection.getByIndex(index)[0]
class CollectionWrapper(object):
"""Wrap an nta::Collection with a dict-like interface
The optional valueWrapper is used to wrap values for adaptation purposes.
Maintains the original documentation
collection - the original collection
valueWrapper - an optional callable object used to wrap values.
"""
def IdentityWrapper(o):
return o
def __init__(self, collection, valueWrapper=IdentityWrapper):
self.collection = collection
self.valueWrapper = valueWrapper
self.__class__.__doc__ == collection.__class__.__doc__
def __iter__(self):
return CollectionIterator(self.collection)
def __str__(self):
return str(self.collection)
def __repr__(self):
return repr(self.collection)
def __len__(self):
return self.collection.getCount()
def __getitem__(self, key):
if not self.collection.contains(key):
raise KeyError('Key ' + key + ' not found')
value = self.collection.getByName(key)
value = self.valueWrapper(key, value)
return value
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
return self.collection.contains(key)
def keys(self):
keys = set()
for i in range(self.collection.getCount()):
keys.add(self.collection.getByIndex(i)[0])
return keys
def values(self):
values = set()
for i in range(self.collection.getCount()):
p = self.collection.getByIndex(i)
values.add(self.valueWrapper(p[0], p[1]))
return values
def items(self):
items = set()
for i in range(self.collection.getCount(
|
murarugeorgec/USB-checking
|
USB/USB_devices/usb_list.py
|
Python
|
gpl-3.0
| 11,203 | 0.015532 |
#! /usr/bin/env python
#
# Copyright 2015 George-Cristian Muraru <murarugeorgec@gmail.com>
# Copyright 2015 Tobias Mueller <muelli@cryptobitch.de>
#
# This file is part of USB Inhibitor.
#
# USB Inhibitor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# USB Inhibitor and the afferent extension is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with USB Inhibitor. If not, see <http://www.gnu.org/licenses/>.
#
import threading
from gi.repository import Gtk, GObject, GLib
from usb_checking import RunningMode, USB_ports
from pyudev import MonitorObserver
import read_device
import usb.core
import time
import gobject
# Modified tutorial http://python-gtk-3-tutorial.readthedocs.org/en/latest/treeview.html
class USB_ViewFilterWindow(Gtk.Window):
def __init__(self):
self.device_monitor = USB_ports(RunningMode.
|
GTK)
self.observer = MonitorObserver(self.device_monitor.monitor, callback = self.refresh,
name='monitor-observer')
Gtk.Window.__init__(self, title = "USBGnomento")
self.set_resizable(True)
self.set_border_width(10)
# Setting up the self.grid in which the elements are to be positionned
self.grid = Gtk.Grid()
self.grid.set_column_homogeneous(True)
|
self.grid.set_row_homogeneous(True)
self.add(self.grid)
# Creating the ListStore model
self.usb_list = Gtk.ListStore(str, bool, str, str, str)
self.current_filter_usb = None
# Creating the filter, feeding it with the usb_list model
self.usb_filter = self.usb_list.filter_new()
# Setting the filter function
self.usb_filter.set_visible_func(self.usb_filter_func)
self.treeview = Gtk.TreeView.new_with_model(self.usb_filter)
col = Gtk.TreeViewColumn("Known Device", Gtk.CellRendererPixbuf(), stock_id = 0)
self.treeview.append_column(col)
for i, column_title in enumerate(["Connected", "DescriptorInfo", "Manufacturer", "Product"]):
i = i + 1
renderer = Gtk.CellRendererText()
renderer.set_property('cell-background', 'grey')
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
self.treeview.append_column(column)
# Creating buttons to filter by device state, and setting up their events
self.buttons = list()
for usb_type in ["Connected Devices", "Known Devices", "Unknown Devices"]:
button = Gtk.Button(usb_type)
self.buttons.append(button)
button.connect("clicked", self.on_selection_button_clicked)
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
self.grid.attach(self.scrollable_treelist, 0, 0, 8, 10)
# Write to know devices
button = Gtk.Button("Write selected")
self.buttons.append(button)
button.connect("clicked", self.write_to_known_devices)
# Remove trusted device
button = Gtk.Button("Remove selected")
self.buttons.append(button)
button.connect("clicked", self.remove_from_known_devices)
self.grid.attach_next_to(self.buttons[0], self.scrollable_treelist, Gtk.PositionType.BOTTOM, 1, 1)
for i, button in enumerate(self.buttons[1:]):
self.grid.attach_next_to(button, self.buttons[i], Gtk.PositionType.RIGHT, 1, 1)
self.scrollable_treelist.add(self.treeview)
self.first_populate_table()
self.show_all()
self.observer.start()
def first_populate_table(self):
for device_id in self.device_monitor.known_devices.keys():
if device_id in self.device_monitor.connected_devices.keys():
self.usb_list.append([Gtk.STOCK_YES, True,
self.device_monitor.known_devices[device_id][1],
self.device_monitor.known_devices[device_id][0]["Manufacturer"],
self.device_monitor.known_devices[device_id][0]["Product"]])
else:
self.usb_list.append([Gtk.STOCK_YES, False,
self.device_monitor.known_devices[device_id][1],
self.device_monitor.known_devices[device_id][0]["Manufacturer"],
self.device_monitor.known_devices[device_id][0]["Product"]])
for device_id in self.device_monitor.connected_devices.keys():
if device_id not in self.device_monitor.known_devices.keys():
print (self.device_monitor.connected_devices[device_id][1])
self.usb_list.append([Gtk.STOCK_NO, True,
self.device_monitor.connected_devices[device_id][1],
self.device_monitor.connected_devices[device_id][0]["Manufacturer"],
self.device_monitor.connected_devices[device_id][0]["Product"]])
# Write selected device to file
# The device would be kept in a buffer until the program exits
def write_to_known_devices(self, button):
treeselection = self.treeview.get_selection()
model, treeiter = treeselection.get_selected()
device = {}
if treeiter != None:
if model[treeiter][0] == Gtk.STOCK_YES:
return
if model[treeiter][3]:
device["Manufacturer"] = model[treeiter][3]
if model[treeiter][4]:
device["Product"] = model[treeiter][4]
print(device["Product"])
print(device["Manufacturer"])
busnum, devnum = model[treeiter][2].split("\n")[0].split("Bus")[1].split("Address")
devnum = devnum.split()[0]
dev = usb.core.find(address=int(devnum), bus=int(busnum))
dev_id = read_device.get_descriptors(dev)
self.device_monitor.add_to_known_device(dev_id, device, dev)
model.set_value(treeiter, 0, Gtk.STOCK_YES)
else:
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CANCEL, "A USB device must be selected!")
dialog.format_secondary_text("The selected USB device will be written to a 'know_hosts' file")
dialog.run()
# Remove selected device from file
def remove_from_known_devices(self, button):
treeselection = self.treeview.get_selection()
model, treeiter = treeselection.get_selected()
device = {}
if treeiter != None:
if model[treeiter][0] == Gtk.STOCK_NO:
return
if model[treeiter][3]:
device["Manufacturer"] = model[treeiter][2]
if model[treeiter][4]:
device["Product"] = model[treeiter][3]
busnum, devnum = model[treeiter][2].split("\n")[0].split("Bus")[1].split("Address")
devnum = devnum.split()[0]
dev = usb.core.find(address=int(devnum), bus=int(busnum))
dev_id = read_device.get_descriptors(dev)
self.device_monitor.known_devices.pop(dev_id)
m
|
RossBrunton/BMAT
|
users/migrations/0008_auto_20150712_2143.py
|
Python
|
mit
| 379 | 0 |
# -
|
*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migr
|
ation(migrations.Migration):
dependencies = [
('users', '0007_settings_no_ads'),
]
operations = [
migrations.AlterModelOptions(
name='settings',
options={'verbose_name_plural': 'Settings'},
),
]
|
PragmaticMates/django-invoicing
|
invoicing/migrations/0021_invoice_related_document.py
|
Python
|
gpl-2.0
| 463 | 0 |
# Generated by D
|
jango 2.0.6 on 2018-11-21 08:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invoicing', '0018_invoice_attachments'),
# ('invoicing', '0020_auto_20181001_1025'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='related_document',
field=models.CharField(blank=T
|
rue, max_length=100),
),
]
|
Yukarumya/Yukarum-Redfoxes
|
testing/firefox-ui/tests/puppeteer/test_security.py
|
Python
|
mpl-2.0
| 1,926 | 0 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from firefox_puppeteer import PuppeteerMixin
from firefox_puppeteer.errors import NoCertificateError
from marionette_harness import MarionetteTestCase
class TestSecurity(PuppeteerMixin, MarionetteTestCase):
def test_get_address_from_certificate(self):
url = 'https://ssl-ev.m
|
ozqa.com'
with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
self.marionette.navigate(url)
cert = self.browser.tabbar.tabs[0].certificate
self.assertIn(cert['co
|
mmonName'], url)
self.assertEqual(cert['organization'], 'Mozilla Corporation')
self.assertEqual(cert['issuerOrganization'], 'DigiCert Inc')
address = self.puppeteer.security.get_address_from_certificate(cert)
self.assertIsNotNone(address)
self.assertIsNotNone(address['city'])
self.assertIsNotNone(address['country'])
self.assertIsNotNone(address['postal_code'])
self.assertIsNotNone(address['state'])
self.assertIsNotNone(address['street'])
def test_get_certificate(self):
url_http = self.marionette.absolute_url('layout/mozilla.html')
url_https = 'https://ssl-ev.mozqa.com'
# Test EV certificate
with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
self.marionette.navigate(url_https)
cert = self.browser.tabbar.tabs[0].certificate
self.assertIn(cert['commonName'], url_https)
# HTTP connections do not have a SSL certificate
with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
self.marionette.navigate(url_http)
with self.assertRaises(NoCertificateError):
self.browser.tabbar.tabs[0].certificate
|
kaathleen/LeapGesture-library
|
DynamicGestures/dlib-18.5/python_examples/max_cost_assignment.py
|
Python
|
mit
| 2,357 | 0.00891 |
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
#
# This simple example shows how to call dlib's optimal linear assignment problem solver.
# It is an implementation of the famous Hungarian algorithm and is quite fast, operating in
# O(N^3) time.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating system
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
import dlib
# Lets imagine you need to assign N people to N jobs. Additionally, each person will make
# your company a certain amount of money at each job, but each person has different skills
# so they are better at some jobs and worse at others. You would like to find the best way
# to assign people to these jobs. In particular, you would like to maximize the amount of
# money the group makes as a whole. This is an example of an assignment problem and is
# what is solved by the dlib.max_cost_assignment() routine.
# So in this exampl
|
e, lets imagine we have 3 people and 3 jobs. We represent the amount of
# money each person will produce at each job with a cost matrix. Each row corresponds to a
# person and each column corresponds to a job. So for example, belo
|
w we are saying that
# person 0 will make $1 at job 0, $2 at job 1, and $6 at job 2.
cost = dlib.matrix([[1, 2, 6],
[5, 3, 6],
[4, 5, 0]])
# To find out the best assignment of people to jobs we just need to call this function.
assignment = dlib.max_cost_assignment(cost)
# This prints optimal assignments: [2, 0, 1]
# which indicates that we should assign the person from the first row of the cost matrix to
# job 2, the middle row person to job 0, and the bottom row person to job 1.
print "optimal assignments: ", assignment
# This prints optimal cost: 16.0
# which is correct since our optimal assignment is 6+5+5.
print "optimal cost: ", dlib.assignment_cost(cost, assignment)
|
Cheaterman/kivy
|
kivy/core/video/video_ffmpeg.py
|
Python
|
mit
| 2,694 | 0.000371 |
'''
FFmpeg video abstraction
========================
.. versionadded:: 1.0.8
This abstraction requires ffmpeg python extensions. We have made a special
extension that is used for the android platform but can also be used on x86
platforms. The project is available at::
http://github.com/tito/ffmpeg-android
The extension is designed for implementing a
|
video player.
Refer to the documentation of the ffmpeg-android project for more information
about the requirements.
'''
try:
import ffmpeg
except:
raise
from kivy.core.video import VideoBase
from kivy.graphics.texture import Texture
|
class VideoFFMpeg(VideoBase):
def __init__(self, **kwargs):
self._do_load = False
self._player = None
super(VideoFFMpeg, self).__init__(**kwargs)
def unload(self):
if self._player:
self._player.stop()
self._player = None
self._state = ''
self._do_load = False
def load(self):
self.unload()
def play(self):
if self._player:
self.unload()
self._player = ffmpeg.FFVideo(self._filename)
self._player.set_volume(self._volume)
self._do_load = True
def stop(self):
self.unload()
def seek(self, percent, precise=True):
if self._player is None:
return
self._player.seek(percent)
def _do_eos(self):
self.unload()
self.dispatch('on_eos')
super(VideoFFMpeg, self)._do_eos()
def _update(self, dt):
if self._do_load:
self._player.open()
self._do_load = False
return
player = self._player
if player is None:
return
if not player.is_open:
self._do_eos()
return
frame = player.get_next_frame()
if frame is None:
return
# first time we got a frame, we know that video is readed now.
if self._texture is None:
self._texture = Texture.create(size=(
player.get_width(), player.get_height()),
colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
self._texture.blit_buffer(frame)
self.dispatch('on_frame')
def _get_duration(self):
if self._player is None:
return 0
return self._player.get_duration()
def _get_position(self):
if self._player is None:
return 0
return self._player.get_position()
def _set_volume(self, value):
self._volume = value
if self._player:
self._player.set_volume(self._volume)
|
DocBO/mubosym
|
mubosym/interp1d_interface.py
|
Python
|
mit
| 1,728 | 0.015046 |
# -*- coding: utf-8 -*-
"""
Created on Sat May 16 18:33:20 2015
@author: oliver
"""
from sympy import symbols, lambdify, sign, re, acos, asin, sin, cos, bspline_basis
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
def read_kl(filename):
with open(filename, 'r') as f:
inp = f.read()
inlist = inp.split('\n')
inlist = [ x for x in inlist if
|
x != '']
inlist = [ x for x in inlist if x[0] != '#']
inlist = [x.split(' ') for x in inlist]
#print inlist
x_in = np.array([ float(x[0]) for x in inlist])
y_in = np.array([ float(x[1]) for x in inlist])
return x_in, y_in
class interp(object):
"""
The main connection between an external force characteri
|
zed by a number of points and the mubosym
After running the initialization the base-functions are setup (by means of optimized coefficients)
:param filename: the external file with a list of x y - values (table, separation sign is space), if filename is empty the function f11 is taken instead
:param tst: if true the result of the optimization is plotted
"""
def __init__(self, filename, tst = False):
self.vx, self.vy = read_kl(filename)
self.f_interp = interp1d(self.vx, self.vy, kind = 'linear', bounds_error=False)
# Test:
if tst:
x_dense = np.linspace(-1., 15., 200)
y_dense = []
for xx in x_dense:
y_dense.append(self.f_interp(xx))
lines = plt.plot( x_dense, y_dense )
plt.show()
if __name__ == "__main__":
k = interp(filename = "/home/oliver/python_work/mubosym01/mubosym/vel_01.dat", tst=True)
|
Danielhiversen/home-assistant
|
tests/components/plugwise/test_config_flow.py
|
Python
|
apache-2.0
| 13,553 | 0.000516 |
"""Test the Plugwise config flow."""
from unittest.mock import AsyncMock, MagicMock, patch
from plugwise.exceptions import (
ConnectionFailedError,
InvalidAuthentication,
PlugwiseException,
)
import pytest
from homeassistant import setup
from homeassistant.components.plugwise.const import (
API,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
FLOW_NET,
FLOW_TYPE,
PW_TYPE,
)
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SOURCE,
CONF_USERNAME,
)
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
TEST_HOST = "1.1.1.1"
TEST_HOSTNAME = "smileabcdef"
TEST_HOSTNAME2 = "stretchabc"
TEST_PASSWORD = "test_password"
TEST_PORT = 81
TEST_USERNAME = "smile"
TEST_USERNAME2 = "stretch"
TEST_DISCOVERY = {
"host": TEST_HOST,
"port": DEFAULT_PORT,
"hostname": f"{TEST_HOSTNAME}.local.",
"server": f"{TEST_HOSTNAME}.local.",
"properties": {
"product": "smile",
"version": "1.2.3",
"hostname": f"{TEST_HOSTNAME}.local.",
},
}
TEST_DISCOVERY2 = {
"host": TEST_HOST,
"port": DEFAULT_PORT,
"hostname": f"{TEST_HOSTNAME2}.local.",
"server": f"{TEST_HOSTNAME2}.local.",
"properties": {
"product": "stretch",
"version": "1.2.3",
"hostname": f"{TEST_HOSTNAME2}.local.",
},
}
@pytest.fixture(name="mock_smile")
def mock_smile():
"""Create a Mock Smile for testing exceptions."""
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock:
smile_mock.PlugwiseException = PlugwiseException
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.return_value.connect.return_value = True
yield smile_mock.return_value
async def test_form_flow_gateway(hass):
"""Test we get the form for Plugwise Gateway product type."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
assert result["step_id"] == "user_gateway"
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_P
|
ASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
|
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_stretch_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY2,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME2,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_username(hass):
"""Test we get the username data back."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.gateway_id = "abcdefgh12345678"
smile_mock.return_value.smile_hostname = TEST_HOST
smile_mock.return_value.smile_name = "Adam"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_USERNAME: TEST_USERNAME2,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME2,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result3["type"] == RESULT_TYPE_FORM
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
smile_mock.return_value.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.connect.side_effect
|
sparkica/simex-service
|
service.py
|
Python
|
gpl-2.0
| 1,442 | 0.021498 |
from flask import Flask
from fla
|
sk.ext import restful
from flask.ext.restful import Resource, reqparse
from lxml import html
import urllib2
import json
app = Flask(__name__)
api = restful.Api(app)
parser = reqparse.RequestParser()
parser.add_argument('url', type=str, location='form')
parser.add_argument('xpath', type=str, location='form')
parser.add_argument('attribute', type=str, location='form')
class SimpleExtractor(Resource):
def post(self, **kwargs):
args = parser.parse_args()
source_url = args['url']
element_xpath =
|
args['xpath']
element_attribute = args['attribute']
result = self.parse_html(source_url, element_xpath, element_attribute)
results = {'elements': [{'value': result }]}
return json.dumps(results)
def get(self):
results = {'elements': [{'value':result}]}
return json.dumps(results)
def parse_html(self, source_url, element_xpath="/title", element_attribute=None):
request = urllib2.urlopen(source_url)
page = request.read()
tree = html.fromstring(page)
elements = tree.xpath(element_xpath)
if len(elements) == 0:
return ''
elem_value = elements[0].attrib[element_attribute] if element_attribute else elements[0].text
return elem_value
class BaseExtractor(Resource):
def get(self):
return {'value':'A simple extraction service'}
api.add_resource(BaseExtractor, '/')
api.add_resource(SimpleExtractor, '/extract')
if __name__ == '__main__':
app.run(debug=True)
|
TU-NHM/plutof-taxonomy-module
|
apps/taxonomy/tests/act_tests.py
|
Python
|
gpl-3.0
| 2,683 | 0.003354 |
from django.test import TestCase
from apps.taxonomy.models import Act
from apps.taxonomy.tests import factories
from apps.taxonomy.tests.base import TaxonomyBaseTestMixin
class TestActCreation(TestCase):
def setUp(self):
super(TestActCreation, self).setUp()
factories.TaxonRankFactory(id=0)
def test_creates_act_for_new_taxon(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.post_created()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="new_taxon").count(), 1)
def test_create_edit_name_act(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.epithet = "new epithet"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="edit_name").count(), 1)
def test_create_change_parent_act(self):
taxonnode = TaxonomyBaseTestMixin.create_working_taxonnode()
taxonnode_new_parent = TaxonomyBaseTestMixin.create_working_taxonnode(taxonnode.tree)
taxonnode.post_changed(parent=taxonnode_new_parent)
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_parent").count(), 1)
def test_not_create_change_parent_act_when_did_not_change(self):
taxonnode = TaxonomyBaseTestMixin.create_working_taxonnode()
taxonnode_parent = TaxonomyBaseTestMixin.create_working_taxonnode(taxonnode.tree)
factories.EdgeFactory(ancestor=taxonnode_parent, descendant=taxonnode)
taxonnode.post_changed(parent=taxonnode_parent)
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_parent").count(), 0)
def test_create_change_to_synonym_act(self):
valid_name = factories.TaxonNodeFactory()
taxonnode = factories.TaxonNodeFactory(tree=valid_name.tree)
taxonnode.valid_name = valid_name
taxonnode.synonym_type = "synony
|
m"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="marked_as_synonym").count(), 1)
def test_create_change_to_basionym_act(self):
valid_name = factories.TaxonNodeFactory()
taxonnode = factories.TaxonNodeFactory(tree=valid_name.tree)
taxonnode.valid_name = valid_name
taxonnode.synonym_type = "basionym"
taxonnode.save()
self.a
|
ssertEqual(Act.objects.filter(taxon_node=taxonnode, type="marked_as_basionym").count(), 1)
def test_create_change_nomen_status_act(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.nomenclatural_status = "established"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_nomen_status").count(), 1)
|
compatibleone/accords-platform
|
tools/codegen/OCCI/Backend.py
|
Python
|
apache-2.0
| 544 | 0.011029 |
class Backend(object):
'''
Backend type with a plugi
|
n and zero or more parameters (Parameter functionality is TBD.
Links to categories handled by this backend
'''
def __init__(self, plugin, params):
self._plugin = plugin
self._params = params
self._categories = []
@property
def plugin(self):
return self._plugin
@property
def params(self):
return self._pa
|
rams
def add_category(self, category):
self._categories.append(category)
|
kyouko-taiga/tango
|
tango/transpilers/cpp.py
|
Python
|
apache-2.0
| 8,789 | 0.001252 |
import hashlib
from tango.ast import *
from tango.builtin import Int, Double, String
from tango.types import FunctionType, NominalType, TypeUnion
def transpile(module, header_stream, source_stream):
transpiler = Transpiler(header_stream, source_stream)
transpiler.visit(module)
def compatibilize(name):
result = str(str(name.encode())[2:-1]).replace('\\', '')
for punct in '. ()[]<>-:':
result = result.replace(punct, '')
if result[0].isdigit():
result = '_' + result
return result
operator_translations = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
}
class Functor(object):
def __init__(self, function_type):
self.function_type = function_type
@property
def type_signature(self):
# FIXME This discriminator isn't good enough, as different signatures
# may have the same string representation, since their `__str__`
# implementation doesn't use full names.
discriminator = hashlib.sha1(str(self.function_type).encode()).hexdigest()[-8:]
return compatibilize('Sig' + str(self.function_type) + discriminator)
class Transpiler(Visitor):
def __init__(self, header_stream, source_stream):
self.header_stream = header_stream
self.source_stream = source_stream
self.indent = 0
self.containers = {}
self.functions = {}
self.functors = {}
self.types = {}
def write_header(self, data, end='\n'):
print(' ' * self.indent + data, file=self.header_stream, end=end)
def write_source(self, data, end='\n'):
print(' ' * self.indent + data, file=self.source_stream, end=end)
def visit_ModuleDecl(self, node):
self.write_source('#include "tango.hh"')
self.write_source('')
self.write_source('int main(int argc, char* argv[]) {')
self.indent += 4
self.generic_visit(node)
self.write_source('return 0;')
self.indent -= 4
self.write_source('}')
def visit_ContainerDecl(self, node):
# Write a new variable declaration.
var_type = self.translate_type(node.__info__['type'])
var_name = compatibilize(node.__info__['scope'].name + '_' + node.name)
declaration = var_type + ' ' + var_name
# If the container's has an initial value, write it as well.
if node.initial_value:
declaration += ' = ' + self.translate_expr(node.initial_value)
self.write_source(declaration + ';')
def visit_Call(self, node):
self.write_source(self.translate_expr(node) + ';')
def visit_If(self, node):
assert not node.pattern.parameters, 'TODO pattern matching in if expressions'
condition = self.translate_expr(node.pattern.expression)
self.write_source('if (' + condition + ') {')
self.indent += 4
self.visit(node.body)
self.indent -= 4
self.write_source('}')
if isinstance(node.else_clause, Block):
self.write_source('else {')
self.indent += 4
self.visit(node.else_clause)
self.indent -= 4
self.write_source('}')
elif isinstance(node.else_clause, If):
self.write_source('else')
self.visit(node.else_clause)
def translate_type(self, type_instance):
if isinstance(type_instance, NominalType):
return compatibilize(type_instance.scope.name + '_' + type_instance.name)
if isinstance(type_instance, FunctionType):
# Register a new functor for the parsed function type.
functor = self.functors.get(type_instance)
if functor is None:
functor = Functor(type_instance)
self.functors[type_instance] = functor
return 'std::shared_ptr<' + functor.type_signature + '>'
assert False, 'cannot translate {}'.format(type_instance)
def translate_expr(self, node):
if isinstance(node, Literal):
if node.__info__['type'] == String:
return '"' + node.value + '"'
return node.value
if isinstance(node, Identifier):
# If the identifier is `true` or `false`, we write it as is.
if node.name in ['true', 'false']:
return node.name
# If the identifier isn't a keyword, first, we retrive the entity
# the identifier is denoting.
decls = node.__info__['scope'][node.name]
# If the identifier denotes a simple container, we return its full
# name (i.e. scope + name).
if isinstance(decls[0], ContainerDecl):
return compatibilize(node.__info__['scope'].name + '_' + node.name)
# If the identifier denotes a function declaration, we have to
# know which overload and/or specialization it refers to, so as to
# create a different full name for each case.
if isinstance(decls[0], FunctionDecl):
# If the identifier has a single type non generic type, we can
# use it as is to discriminate the identifier.
node_type = node.__info__['type']
if not isinstance(node_type, TypeUnion) and not node_type.is_generic:
discriminating_type = node_type
# If the identifier was used as the callee of a function call,
# we can expect the type solver to add a `specialized_type`
# key in the node's metadata.
elif 'specialized_type' in node.__info__:
discriminating_type = node.__info__['specialized_type']
# It should be illegal to use an overloaded or generic
# identifier outside of a function call.
else:
assert False, (
"ambiguous use of '{}' wasn't handled by the type disambiguator"
|
.format(node))
# FIXME This discriminator isn't good enough, as different
# signatures may have the same string representation, since
# their `__str__` implementation doesn't use full names.
discriminator = hashlib.sha1(str(discriminating_type).encode()).hexdigest()[-8:]
return compatibilize(node.__info__['scope'].name +
|
'_' + node.name + discriminator)
if isinstance(node, PrefixedExpression):
return '{}.{}({})'.format(
self.translate_type(node.operand.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.operand))
if isinstance(node, BinaryExpression):
return '{}.{}({}, {})'.format(
self.translate_type(node.left.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.left),
self.translate_expr(node.right))
if isinstance(node, Call):
callee_name = self.translate_expr(node.callee)
return '(*({}))({})'.format(
callee_name,
', '.join(map(self.translate_expr, node.arguments)))
if isinstance(node, CallArgument):
return self.translate_expr(node.value)
assert False, 'cannot translate {}'.format(node)
def find_function_implementation(node):
scope = node.callee.__info__['scope']
while scope is not None:
for decl in node.callee.__info__['scope'][node.callee.name]:
# When the object denoted by the identifier is a declaration, it
# means we have to instantiate that declaration.
if isinstance(decl, FunctionDecl):
function_type = decl.__info__['type']
# We select the first non-generic function declaration that
# that matches the signature candidate of the call node.
if function_type == node.__info__['signature_candidate']:
return decl
assert not function_type.is_generic, 'TODO: {} is generic'.format(function_type)
|
michaelwisely/django-competition
|
src/competition/views/competition_views.py
|
Python
|
bsd-3-clause
| 1,178 | 0.000849 |
from django.views.generic import ListView, DetailView
from django.core.exceptions import ObjectDoesNotExist
from competition.models.competition_model import Competition
class CompetitionListView(ListView):
"""Lists every single competition"""
|
context_object_name = 'competitions'
model = Competition
template_name = 'competition/competition/competition_list.html'
paginate_by = 10
class CompetitionDetailView(DetailView):
"""Shows details about a particular competition"""
context_object_name = 'competition'
model = Competition
slug_url_kwarg = 'comp_slug'
template_name = 'competition/competition/competition_de
|
tail.html'
def get_context_data(self, **kwargs):
context = super(CompetitionDetailView, self).get_context_data(**kwargs)
competition = self.object
user = self.request.user
context['user_registered'] = competition.is_user_registered(user)
context['user_team'] = None
try:
if not user.is_anonymous():
context['user_team'] = competition.team_set.get(members=user.pk)
except ObjectDoesNotExist:
pass
return context
|
eLBati/purchase-workflow
|
framework_agreement/model/pricelist.py
|
Python
|
agpl-3.0
| 3,558 | 0.000281 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2013, 2014 Camptocamp SA
#
# This program is free sof
|
tware: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PU
|
RPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.osv import orm, fields
# Using new API seem to have side effect on
# other official addons
class product_pricelist(orm.Model):
"""Add framework agreement behavior on pricelist"""
_inherit = "product.pricelist"
def _plist_is_agreement(self, cr, uid, pricelist_id, context=None):
"""Check that a price list can be subject to agreement.
:param pricelist_id: the price list to be validated
:returns: a boolean (True if agreement is applicable)
"""
p_list = self.browse(cr, uid, pricelist_id, context=context)
return p_list.type == 'purchase'
def price_get(self, cr, uid, ids, prod_id, qty,
partner=None, context=None):
"""Override of price retrival function in order to support framework agreement.
If it is a supplier price list agreement will be taken in account
and use the price of the agreement if required.
If there is not enough available qty on agreement,
standard price will be used.
This is maybe a faulty design and we should use on_change override
"""
if context is None:
context = {}
agreement_obj = self.pool['framework.agreement']
res = super(product_pricelist, self).price_get(
cr, uid, ids, prod_id, qty, partner=partner, context=context)
if not partner:
return res
for pricelist_id in res:
if (pricelist_id == 'item_id' or not
self._plist_is_agreement(cr, uid,
pricelist_id, context=context)):
continue
now = datetime.strptime(fields.date.today(),
DEFAULT_SERVER_DATE_FORMAT)
date = context.get('date') or context.get('date_order') or now
prod = self.pool['product.product'].browse(cr, uid, prod_id,
context=context)
agreement = agreement_obj.get_product_agreement(
cr, uid,
prod.product_tmpl_id.id,
partner,
date,
qty=qty,
context=context
)
if agreement is not None:
currency = agreement_obj._get_currency(
cr, uid, partner, pricelist_id,
context=context
)
res[pricelist_id] = agreement.get_price(qty, currency=currency)
return res
|
ulisespereira/PereiraBrunel2016
|
figure7/plotting.py
|
Python
|
gpl-2.0
| 5,736 | 0.043061 |
import numpy as np
import matplotlib.pyplot as plt
from stimulus import *
from myintegrator import *
from functions import *
import matplotlib.gridspec as gridspec
import cPickle as pickle
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-----------------Stimulation of Populations------------------------
#-------------------------------------------------------------------
# settin`g up the simulation
#times = 100
#delta = 50
#period = 30
patterns=np.identity(n)
patterns=[patterns[:,i] for i in range(n)]
mystim=stimulus(patterns,lagStim,delta,period,times)
mystim.inten=amp
#integrator
npts=int(np.floor(delay/dt)+1) # points delay
tmax=times*(lagStim+n*(period+delta))+100.+mystim.delay_begin
thetmax=tmax+40000
#t = np.linspace(0,thetmax,100000)
u,uI,connectivity,WEI,t = pickle.load(open('dyn_stimulation_SA.p','rb'))
#-----------------------------------------------------------------------------------------
#-------------------------------- Dynamics-----------------------------------------------
#----------------------------------------------------------------------------------------
#initial conditions
tmaxdyn=500
mystim.inten=0.
theintegrator=myintegrator(delay,dt,n,tmaxdyn)
theintegrator.fast=False
#integration
u_ret,uI_ret,connectivity_ret,WEI_ret,t_ret = pickle.load(open('dyn_retrieval_SA.p','rb'))
u_ret_PA,uI_ret_PA,connectivity_ret_PA,WEI_ret_PA,t_ret_PA = pickle.load(open('dyn_retrieval_PA.p','rb'))
#-------------------------------------------------------------------
#-----------------Stimulation of Populations------------------------
#-------------------------------------------------------------------
rc={'axes.labelsize': 32, 'font.size': 30, 'legend.fontsize': 25.0, 'axes.titlesize': 35}
plt.rcParams.update(**rc)
plt.rcParams['image.cmap'] = '
|
jet'
fig = plt.figure(figsize=(19, 11))
gs = gridspec.GridSpec(2, 2)#height_ratios=[3,3,2])
gs.update(wspace=0.44,hspace=0.03)
gs0 = gridspec.GridSpec(2, 2)
gs0.update(wspace=0.05,hspace=0.4,left=0.54,right=1.,top=0.88,bottom
|
=0.1106)
#gs1.update(wspace=0.05,hspace=0.4,left=0.1245,right=1.,top=0.21,bottom=0.05)
# Excitatory and Inhibitory weights
ax1A = plt.subplot(gs[0,0])
ax1B = plt.subplot(gs[1,0])
#sequence
axSA = plt.subplot(gs0[1,0])
axPA = plt.subplot(gs0[1,1])
#stimulation
ax2B= plt.subplot(gs0[0,0])
ax2C= plt.subplot(gs0[0,1])
colormap = plt.cm.Accent
ax2B.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
ax2B.plot(t,phi(u[:,:],theta,uc),lw=3)
mystim.inten=.1
elstim=np.array([sum(mystim.stim(x)) for x in t])
ax2B.plot(t,elstim,'k',lw=3)
ax2B.fill_between(t,np.zeros(len(t)),elstim,alpha=0.5,edgecolor='k', facecolor='darkgrey')
ax2B.set_ylim([0,1.2])
ax2B.set_xlim([0,600])
ax2B.set_yticks([0.5,1])
ax2B.set_xticks([0,200,400])
ax2B.set_xticklabels([0.,.2,.4])
ax2B.set_xlabel('Time (s)')
ax2B.set_ylabel('Rate')
ax2B.set_title('(B)',x=1.028,y=1.04)
ax2C.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
ax2C.plot(t,phi(u[:,:],theta,uc),lw=3)
mystim.inten=.1
elstim=np.array([sum(mystim.stim(x)) for x in t])
ax2C.plot(t,elstim,'k',lw=3)
ax2C.fill_between(t,np.zeros(len(t)),elstim,alpha=0.5,edgecolor='k', facecolor='darkgrey')
ax2C.set_xlim([89475,90075])
ax2C.set_xticks([89500,89700,89900])
ax2C.set_xticklabels([89.5,89.7,89.9])
ax2C.set_ylim([0,1.2])
ax2C.set_yticks([])
ax2C.set_xlabel('Time (s)')
#ax2C.set_ylabel('Rate')
#----------------------------------------------------------------------
#------------Synaptic Weights------------------------------------------
#----------------------------------------------------------------------
for i in range(10):
ax1A.plot(t,connectivity[:,i,i],'c',lw=3)
for i in range(0,9):
ax1A.plot(t,connectivity[:,i+1,i],'y',lw=3)
for i in range(8):
ax1A.plot(t,connectivity[:,i+2,i],'g',lw=3)
for i in range(9):
ax1A.plot(t,connectivity[:,i,i+1],'r',lw=3)
for i in range(8):
ax1A.plot(t,connectivity[:,i,i+2],'b',lw=3)
ax1A.set_xticks([])
ax1A.axvline(x=tmax,ymin=0,ymax=2.,linewidth=2,ls='--',color='gray',alpha=0.7)
#ax1A.set_xticklabels([0,50,100,150])
ax1A.set_ylim([0,1.8])
ax1A.set_xlim([0,250000])
ax1A.set_yticks([0,0.5,1.,1.5])
#ax1A.set_xlabel('Time (s)')
ax1A.set_ylabel('Synaptic Weights')
ax1A.set_title('(A)',y=1.04)
#------------------------------------------------------------------------
#-------------Homeostatic Variable --------------------------------------
#------------------------------------------------------------------------
ax1B.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
ax1B.plot(t,WEI[:],lw=3)
ax1B.axvline(x=tmax,ymin=0,ymax=2.,linewidth=2,ls='--',color='gray',alpha=0.7)
ax1B.set_ylim([0.,3.4])
ax1B.set_yticks([0.,1.,2.,3.])
ax1B.set_xlim([0,250000])
ax1B.set_xticks([0,50000,100000,150000,200000,250000])
ax1B.set_xticklabels([0,50,100,150,200,250])
ax1B.set_xlabel('Time (s)')
ax1B.set_ylabel(r'$W_{EI}$')
#plot sequence
axSA.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
axSA.plot(t_ret,phi(u_ret[:,:],theta,uc),lw=5)
axSA.set_ylim([0,1.2])
axSA.set_xlim([0,370])
axSA.set_xticks([0,100,200,300])
axSA.set_yticks([0.5,1])
axSA.set_xlabel('Time (ms)')
axSA.set_ylabel('Rate')
#axSA.set_title('(C)',y=1.04)
axSA.set_title('(C)',x=1.028,y=1.04)
# plot PA
axPA.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
axPA.plot(t_ret_PA,phi(u_ret_PA[:,:],theta,uc),lw=5)
axPA.set_ylim([0,1.2])
axPA.set_xlim([0,370])
axPA.set_xticks([0,100,200,300])
axPA.set_yticks([])
axPA.set_xlabel('Time (ms)')
#plt.show()
plt.savefig('fig6.pdf', bbox_inches='tight')
|
jnsebgosselin/WHAT
|
gwhat/common/styles.py
|
Python
|
gpl-3.0
| 1,792 | 0.001117 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
# Standard library imports :
import platform
# Third party imports :
from PyQt5.QtGui import QIcon, QFont, QFontDatabase
from PyQt5.QtCore import QSize
class StyleDB(object):
def __init__(self):
# ---- frame
self.frame = 22
self.HLine = 52
self.VLine = 53
self.sideBarWidth = 275
# ----- colors
self.red = '#C83737'
self.lightgray = '#E6E6E6'
self.rain = '#0000CC'
self.snow = '0.7'
self.wlvl = '#0000CC' # '#000099'
if platform.system() == 'Windows':
self.font1 = QFont('Segoe UI', 11) # Calibri, Cambria
self.font_console = QFont('Segoe UI', 9)
self.font_menubar = QFont('Segoe UI', 10)
elif platform.system() == 'Linux':
self.font1 = QFont('Ubuntu', 11)
self.font_console = QFont('Ubuntu', 9)
self.font_menubar = QFont('Ubuntu', 10)
# database = QFontDatabase()
# print database.families()
if platform.system() == 'Windows':
self.fontfamily = "Segoe UI" # "Cambria" #"Calibri" #"Segoe UI""
elif platform.system() == 'Lin
|
ux':
self.fontfamily = "Ubuntu"
# self.fontSize1.setPointSize(11)
# 17 = QtGui.QFrame.Box | QtGui.QFrame.Plain
# 22 = QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain
# 20 = QtGui.QFrame.HLine | QtGui.QFr
|
ame.Plain
# 52 = QtGui.QFrame.HLine | QtGui.QFrame.Sunken
# 53 = QtGui.QFrame.VLine | QtGui.QFrame.Sunken
|
chrisglass/buildout-django_base_project
|
myproject/settings.py
|
Python
|
bsd-3-clause
| 5,070 | 0.001578 |
# Django settings for myproject project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.database', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on al
|
l operating systems.
# On Unix systems, a val
|
ue of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '!+inmeyi&8(l8o^60a*i#xf6a!%!@qp-0+kk2%+@aui2x5!x=5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'project',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
moiseslorap/RIT
|
Intro to Software Engineering/Release 2/HealthNet/HealthNet/wsgi.py
|
Python
|
mit
| 395 | 0 |
"""
WSGI config for HealthNet pro
|
ject.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/w
|
sgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HealthNet.settings")
application = get_wsgi_application()
|
netantho/MozDef
|
tests/conftest.py
|
Python
|
mpl-2.0
| 2,263 | 0.028281 |
import pytest
import tempfile
import os
import ConfigParser
def getConfig(optionname,thedefault,section,configfile):
"""read an option from a config
|
file or set a default
send 'thedefault' as the data class you want to get a string back
i.e. 'True' will return a string
True will return a bool
1 will return an int
"""
#getConfig('something','adefaultvalue')
retvalue=thedefault
opttype=type(thedefault)
if os.path.isfile(configfile):
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
if config.has_option(sect
|
ion,optionname):
if opttype==bool:
retvalue=config.getboolean(section,optionname)
elif opttype==int:
retvalue=config.getint(section,optionname)
elif opttype==float:
retvalue=config.getfloat(section,optionname)
else:
retvalue=config.get(section,optionname)
return retvalue
@pytest.fixture
def options():
options=dict()
configFile='setup.cfg'
if pytest.config.inifile:
configFile=str(pytest.config.inifile)
options["esserver"]=getConfig('esserver','localhost:9200','mozdef',configFile)
options["loginput"]=getConfig('loginput','localhost:8080','mozdef',configFile)
options["webuiurl"]=getConfig('webuiurl','http://localhost/','mozdef',configFile)
options["kibanaurl"]=getConfig('kibanaurl','http://localhost:9090/','mozdef',configFile)
if pytest.config.option.verbose > 0:
options["verbose"]=True
print('Using options: \n\t%r' % options)
else:
options["verbose"]=False
return options
@pytest.fixture()
def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
def pytest_report_header(config):
if config.option.verbose > 0:
return ["reporting verbose test output"]
#def pytest_addoption(parser):
#parser.addoption("--esserver",
#action="store",
#default="localhost:9200",
#help="elastic search servers to use for testing")
#parser.addoption("--mozdefserver",
#action="store",
#default="localhost:8080",
#help="mozdef server to use for testing")
|
stellaf/sales_rental
|
sale_rental/models/product.py
|
Python
|
gpl-3.0
| 2,194 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
# Copyright 2016 Sodexis (http://sodexis.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api, _
from openerp.exceptions import ValidationError
class ProductProduct(models.Model):
_inherit = 'product.product'
# Link rental service -> rented HW product
rented_product_id = fields.Many2one(
'product.product', string='Related Rented Product',
domain=[('type', 'in', ('product', 'consu'))])
# Link rented HW product -> rental service
rental_service_ids = fields.One2many(
|
'product.product', 'rented_product_id',
string='Related Rental Services')
@api.one
@api.constrains('rented_product_id', 'must_have_dates', 'type', 'uom_id')
def _check_rental(self):
if self.rented_product_id and self.type != 'service':
raise ValidationError(_(
"The r
|
ental product '%s' must be of type 'Service'.")
% self.name)
if self.rented_product_id and not self.must_have_dates:
raise ValidationError(_(
"The rental product '%s' must have the option "
"'Must Have Start and End Dates' checked.")
% self.name)
# In the future, we would like to support all time UoMs
# but it is more complex and requires additionnal developments
day_uom = self.env.ref('product.product_uom_day')
if self.rented_product_id and self.uom_id != day_uom:
raise ValidationError(_(
"The unit of measure of the rental product '%s' must "
"be 'Day'.") % self.name)
@api.multi
def _need_procurement(self):
# Missing self.ensure_one() in the native code !
res = super(ProductProduct, self)._need_procurement()
if not res:
for product in self:
if product.type == 'service' and product.rented_product_id:
return True
# TODO find a replacement for soline.rental_type == 'new_rental')
return res
|
CloudifySource/cloudify-aws
|
setup.py
|
Python
|
apache-2.0
| 1,176 | 0 |
########
# Copyright (c) 2013 G
|
igaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law
|
or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
__author__ = 'Ganesh'
from setuptools import setup
version = '0.3'
setup(
name='cloudify-aws',
version=version,
author='ran',
author_email='ganeshpandi.g@cloudenablers.com',
packages=['cloudify_aws'],
license='LICENSE',
description='the cloudify amazon provider',
package_data={'cloudify_aws': ['cloudify-config.yaml',
'cloudify-config.defaults.yaml']},
install_requires=[
"scp",
"fabric",
"jsonschema",
"IPy", 'boto'
]
)
|
christophmark/bayesloop
|
bayesloop/fileIO.py
|
Python
|
mit
| 947 | 0.002112 |
#!/usr/bin/env python
"""
The following functions save or load instances of all `Study` types using the Python package `dill`.
"""
from __future__ import division, print_function
import dill
def save(filename, study):
"""
Save an instance
|
of a bayesloop study class to file.
Args:
filename(str): Path + filename to store bayesloop study
study: Instance of study class (Study, HyperStudy, etc.)
"""
with open(filename, 'wb') as f:
dill.dump(study, f, protocol=dill.HIGHEST_PROTOCOL)
print('+ Successfully saved current study.')
def load(filename):
"""
Load an instance of a bayesloop study class that was saved using the bayesloop.s
|
ave() function.
Args:
filename(str): Path + filename to stored bayesloop study
Returns:
Study instance
"""
with open(filename, 'rb') as f:
S = dill.load(f)
print('+ Successfully loaded study.')
return S
|
pvarenik/PyCourses
|
model/group.py
|
Python
|
gpl-2.0
| 592 | 0.005068 |
__author__ = 'pvarenik'
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
|
def __repr__(self):
return "%s,%s,%s,%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.i
|
d) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
|
brianmckinneyrocks/django-social-auth
|
contrib/tests/test_core.py
|
Python
|
bsd-3-clause
| 5,521 | 0.00163 |
# -*- coding: utf-8 -*-
import urlparse
from selenium import webdriver
from django.test import TestCase
from django.conf import settings
class BackendsTest(TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
def tearDown(self):
self.driver.quit()
def url(self, path):
return urlparse.urljoin(settings.TEST_DOMAIN, path)
def test_twitter_backend(self):
# We grab the Twitter testing user details from settings file
TEST_TWITTER_USER = getattr(settings, 'TEST_TWITTER_USER', None)
TEST_TWITTER_PASSWORD = getattr(settings, 'TEST_TWITTER_PASSWORD', None)
self.assertTrue(TEST_TWITTER_USER)
self.assertTrue(TEST_TWITTER_PASSWORD)
self.driver.get(self.url('/login/twitter/'))
# We log in
username_field = self.driver.find_element_by_id('username_or_email')
username_field.send_keys(TEST_TWITTER_USER)
password_field = self.driver.find_element_by_id('password')
password_field.send_keys(TEST_TWITTER_PASSWORD)
password_field.submit()
# The application might be already allowed
try:
self.driver.find_element_by_id('allow').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_google_oauth_backend(self):
TEST_GOOGLE_USER = getattr(settings, 'TEST_GOOGLE_USER', None)
TEST_GOOGLE_PASSWORD = getattr(settings, 'TEST_GOOGLE_PASSWORD', None)
self.assertTrue(TEST_GOOGLE_USER)
self.assertTrue(TEST_GOOGLE_PASSWORD)
self.driver.get(self.url('/login/google-oauth/'))
# We log in
username_field = self.driver.find_element_by_id('Email')
username_field.send_keys(TEST_GOOGLE_USER)
password_field = self.driver.find_element_by_id('Passwd')
password_field.send_keys(TEST_GOOGLE_PASSWORD)
password_field.submit()
# The application might be already allowed
try:
self.driver.find_element_by_id('allow').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_google_oauth2_backend(self):
TEST_GOOGLE_USER = getattr(settings, 'TEST_GOOGLE_USER', None)
TEST_GOOGLE_PASSWORD = getattr(settings, 'TEST_GOOGLE_PASSWORD', None)
self.assertTrue(TEST_GOOGLE_USER)
self.assertTrue(TEST_GOOGLE_PASSWORD)
self.driver.get(self.url('/login/google-oauth2/'))
# We log in
username_field = self.driver.find_element_by_id('Email')
username_field.send_keys(TEST_GOOGLE_USER)
password_field = self.driver.find_element_by_id('Passwd')
password_field.send_keys(TEST_GOOGLE_PASSWORD)
password_field.submit()
# The application might be already allowed
try:
self.driver.find_element_by_id('submit_approve_access').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_facebook_backend(self):
TEST_FACEBOOK_USER = getattr(settings, 'TEST_FACEBOOK_USER', None)
TEST_FACEBOOK_PASSWORD = getattr(settings, 'TEST_FACEBOOK_PASSWORD', None)
self.assertTrue(TEST_FACEBOOK_USER)
self.assertTrue
|
(TEST_FACEBOOK_PASSWORD)
self.driver.get(self.url('/login/facebook/'))
# We log in
username_field = self.driver.find_element_by_id('email')
username_field.send_keys(TEST_FACEBOOK_USER)
password_field = self.driver.find_elemen
|
t_by_id('pass')
password_field.send_keys(TEST_FACEBOOK_PASSWORD)
password_field.submit()
try:
self.driver.find_element_by_name('grant_clicked').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_linkedin_backend(self):
TEST_LINKEDIN_USER = getattr(settings, 'TEST_LINKEDIN_USER', None)
TEST_LINKEDIN_PASSWORD = getattr(settings, 'TEST_LINKEDIN_PASSWORD', None)
self.assertTrue(TEST_LINKEDIN_USER)
self.assertTrue(TEST_LINKEDIN_PASSWORD)
self.driver.get(self.url('/login/linkedin/'))
# We log in
username_field = self.driver.find_element_by_id('session_key-oauthAuthorizeForm')
username_field.send_keys(TEST_LINKEDIN_USER)
password_field = self.driver.find_element_by_id('session_password-oauthAuthorizeForm')
password_field.send_keys(TEST_LINKEDIN_PASSWORD)
password_field.submit()
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
|
Khan/reviewboard
|
reviewboard/attachments/admin.py
|
Python
|
mit
| 582 | 0 |
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from reviewboard.attachments.model
|
s import FileAttachment
class FileAttachmentAdmin(admin.ModelAdmin):
list_display = ('file', 'caption', 'mimetype',
'review_request_id')
list_display_links = ('file', 'caption')
search_fields = ('caption', 'mimetype')
def review_request_id(self, obj):
return obj.review_request.get().id
review_request_id.short_description = _('Review request ID')
admin.site.register(FileAttachment, Fil
|
eAttachmentAdmin)
|
SKIRT/PTS
|
evolve/genomes/nucleobases.py
|
Python
|
agpl-3.0
| 1,754 | 0.001711 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.evolve.genomes.nucleobases
# -----------------------------------------------------------------
# Import other evolve modules
from ..core.genome import GenomeBase, G1DBase
from ..core import constants
# -----------------
|
------------------------------------------------
class N
|
ucleoBases(G1DBase):
"""
NucleoBases genome
"""
__slots__ = ["nbases"]
# -----------------------------------------------------------------
def __init__(self, nbases):
"""
The initializator of the NucleoBases genome representation
"""
# Call the constructor of the base class
super(NucleoBases, self).__init__(nbases)
# Set nbases
self.nbases = nbases
# Set function slots
self.initializator.set(constants.CDefG1DBinaryStringInit)
self.mutator.set(constants.CDefG1DBinaryStringMutator)
self.crossover.set(constants.CDefG1DBinaryStringCrossover)
# -----------------------------------------------------------------
def __repr__(self):
"""
Return a string representation of the genome
"""
ret = GenomeBase.__repr__(self)
ret += "- G1DBinaryString\n"
ret += "\tNumber of bases:\t %s\n" % (self.getListSize(),)
ret += "\tBases:\t\t" + "".join(self.genomeList) + "\n\n"
return ret
# -----------------------------------------------------------------
|
SeedScientific/polio
|
datapoints/migrations/0039_auto__chg_field_region_region_code.py
|
Python
|
agpl-3.0
| 15,288 | 0.008242 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Region.region_code'
db.alter_column('region', 'region_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=55))
def backwards(self, orm):
# Changing field 'Region.region_code'
db.alter_column('region', 'region_code', self.gf('django.db.models.fields.CharField')(max_length=10, unique=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.aggregationexpecteddata': {
'Meta': {'object_name': 'AggregationExpectedData', 'db_table': "'aggregation_expected_data'"},
'aggregation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.AggregationType']"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '55', 'populate_from': "('aggregation_type', 'content_type')"})
},
u'datapoints.aggregationtype': {
'Meta': {'object_name': 'AggregationType', 'db_table': "'aggregation_type'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'display_name_w_sub': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'datapoints.campaign': {
'Meta': {'ordering': "('-start_date',)", 'unique_together': "(('office', 'start_date'),)", 'object_name': 'Campaign', 'db_table': "'campaign'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'get_full_name'"}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'datapoints.datapoint': {
'Meta': {'ordering': "['reg
|
ion', 'campaign']", 'unique_together': "(('indicator', 'region', 'campaign'),)", 'object_name': 'DataPoint', 'db_table': "'datapoint'"},
'campaign': ('django.db.mode
|
ls.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.SourceDataPoint']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
u'datapoints.indicator': {
'Meta': {'ordering': "('name',)", 'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'popul
|
CKehl/pylearn2
|
pylearn2/scripts/tutorials/convolutional_network/autoencoder.py
|
Python
|
bsd-3-clause
| 201 | 0.004975 |
from pylearn2.models.mlp import M
|
LP
class Autoencoder(MLP):
"""
An MLP whose output domain is the same as its input domain.
"""
def get_target_source(self):
retu
|
rn 'features'
|
tpeek/Copy-n-Haste
|
CopyHaste/cnh_profile/migrations/0002_auto_20150810_1822.py
|
Python
|
mit
| 764 | 0.002618 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrati
|
ons
class Migration(migrations.Migration):
dependencies = [
('cnh_profile', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='cnhprofile',
name='website_url',
),
migrations.AddField(
model_name='cnhprofile',
name='we
|
bsite',
field=models.URLField(help_text=b'What is your website URL?', blank=True),
),
migrations.AlterField(
model_name='cnhprofile',
name='nickname',
field=models.CharField(help_text=b'What is your nickname', max_length=16, null=True, blank=True),
),
]
|
jpshort/odoo
|
marcos_addons/marcos_stock/__openerp__.py
|
Python
|
agpl-3.0
| 2,134 | 0.001406 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any l
|
ater version.
#
# T
|
his program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Marcos Stock",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Your Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'stock', 'stock_account', 'account', 'purchase'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'wizard/inventory_import_view.xml',
'wizard/stock_invoice_onshipping_view.xml',
'templates.xml',
'wizard/stock_return_picking_view.xml',
'invoice_link/stock_view.xml',
'invoice_link/account_invoice_view.xml'
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
}
|
wallnerryan/quantum_migrate
|
quantum/agent/linux/external_process.py
|
Python
|
apache-2.0
| 3,644 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from oslo.config import cfg
from quantum.agent.linux import ip_lib
from quantum.agent.linux import utils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
cfg.CONF.register_opts(OPTS)
class ProcessManager(object):
"""An external process manager for Quantum spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, root_helper='sudo', namespace=None):
self.conf = conf
self.uuid = uuid
self.root_helper = root_helper
self.namespace = namespace
def enable(self, cmd_callback):
if not self.active:
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
if self.namespace:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
else:
# For normal sudo prepend the env vars before command
utils.execute(cmd, self.root_helper)
def disable(self):
pid = self.pid
if self.active:
cmd = ['kill', '-9', pid]
if self.namespace:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
else:
utils.execute(cmd, self.root_helper)
elif pid:
LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'command'), {'uuid': self.uuid, 'pid': pid})
else:
LOG.debug(_('No process started for %s'), self.uuid)
def get_pid_file_name(self, ensure_pids_dir=False):
"""Returns the fil
|
e name for a given kind of config file."""
pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids))
if ensure_pids_dir and not os.path.isdir(pids_dir):
os.makedirs(pids_d
|
ir, 0755)
return os.path.join(pids_dir, self.uuid + '.pid')
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
file_name = self.get_pid_file_name()
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
return int(f.read())
except IOError, e:
msg = _('Unable to access %s')
except ValueError, e:
msg = _('Unable to convert value in %s')
LOG.debug(msg, file_name)
return None
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmd = ['cat', '/proc/%s/cmdline' % pid]
try:
return self.uuid in utils.execute(cmd, self.root_helper)
except RuntimeError, e:
return False
|
google/eng-edu
|
ml/guides/text_classification/train_sequence_model.py
|
Python
|
apache-2.0
| 5,062 | 0 |
"""Module to train sequence model.
Vectorizes training and validation texts into sequences and uses that for
training a sequence model - a sepCNN model. We use sequence model for text
classification when the ratio of number of samples to number of words per
sample for the given dataset is very large (>~15K).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import tensorflow as tf
import numpy as np
import build_model
import load_data
import vectorize_data
import explore_data
FLAGS = None
# Limit on the number of features. We use the top 20K features.
TOP_K = 20000
def train_sequence_model(data,
learning_rate=1e-3,
epochs=1000,
batch_size=128,
blocks=2,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3):
"""Trains s
|
equence model
|
on the given dataset.
# Arguments
data: tuples of training and test texts and labels.
learning_rate: float, learning rate for training model.
epochs: int, number of epochs.
batch_size: int, number of samples per batch.
blocks: int, number of pairs of sepCNN and pooling blocks in the model.
filters: int, output dimension of sepCNN layers in the model.
dropout_rate: float: percentage of input to drop at Dropout layers.
embedding_dim: int, dimension of the embedding vectors.
kernel_size: int, length of the convolution window.
pool_size: int, factor by which to downscale input at MaxPooling layer.
# Raises
ValueError: If validation data has label values which were not seen
in the training data.
"""
# Get the data.
(train_texts, train_labels), (val_texts, val_labels) = data
# Verify that validation labels are in the same range as training labels.
num_classes = explore_data.get_num_classes(train_labels)
unexpected_labels = [v for v in val_labels if v not in range(num_classes)]
if len(unexpected_labels):
raise ValueError('Unexpected label values found in the validation set:'
' {unexpected_labels}. Please make sure that the '
'labels in the validation set are in the same range '
'as training labels.'.format(
unexpected_labels=unexpected_labels))
# Vectorize texts.
x_train, x_val, word_index = vectorize_data.sequence_vectorize(
train_texts, val_texts)
# Number of features will be the embedding input dimension. Add 1 for the
# reserved index 0.
num_features = min(len(word_index) + 1, TOP_K)
# Create model instance.
model = build_model.sepcnn_model(blocks=blocks,
filters=filters,
kernel_size=kernel_size,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate,
pool_size=pool_size,
input_shape=x_train.shape[1:],
num_classes=num_classes,
num_features=num_features)
# Compile model with learning parameters.
if num_classes == 2:
loss = 'binary_crossentropy'
else:
loss = 'sparse_categorical_crossentropy'
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
# Create callback for early stopping on validation loss. If the loss does
# not decrease in two consecutive tries, stop training.
callbacks = [tf.keras.callbacks.EarlyStopping(
monitor='val_loss', patience=2)]
# Train and validate model.
history = model.fit(
x_train,
train_labels,
epochs=epochs,
callbacks=callbacks,
validation_data=(x_val, val_labels),
verbose=2, # Logs once per epoch.
batch_size=batch_size)
# Print results.
history = history.history
print('Validation accuracy: {acc}, loss: {loss}'.format(
acc=history['val_acc'][-1], loss=history['val_loss'][-1]))
# Save model.
model.save('rotten_tomatoes_sepcnn_model.h5')
return history['val_acc'][-1], history['val_loss'][-1]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data',
help='input data directory')
FLAGS, unparsed = parser.parse_known_args()
# Using the Rotten tomatoes movie reviews dataset to demonstrate
# training sequence model.
data = load_data.load_rotten_tomatoes_sentiment_analysis_dataset(
FLAGS.data_dir)
train_sequence_model(data)
|
lig/pystardict
|
pystardict.py
|
Python
|
gpl-3.0
| 19,916 | 0.001308 |
import gzip
import hashlib
import os
import re
import warnings
from struct import unpack
import six
class _StarDictIfo(object):
"""
The .ifo file has the following format:
StarDict's dict ifo file
version=2.4.2
[options]
Note that the current "version" string must be "2.4.2" or "3.0.0". If it's not,
then StarDict will refuse to read the file.
If version is "3.0.0", StarDict will parse the "idxoffsetbits" option.
[options]
---------
In the example above, [options] expands to any of the following lines
specifying information about the dictionary. Each option is a keyword
followed by an equal sign, then the value of that option, then a
newline. The options may be appear in any order.
Note that the dictionary must have at least a bookname, a wordcount and a
idxfilesize, or the load will fail. All other information is optional. All
strings should be encoded in UTF-8.
Available options:
bookname= // required
wordcount= // required
synwordcount= // required if ".syn" file exists.
idxfilesize= // required
idxoffsetbits= // New in 3.0.0
author=
email=
website=
description= // You can use <br> for new line.
date=
sametypesequence= // very important.
"""
def __init__(self, dict_prefix, container):
ifo_filename = '%s.ifo' % dict_prefix
try:
_file = open(ifo_filename)
except Exception as e:
raise Exception('ifo file opening error: "{}"'.format(e))
_file.readline()
# skipping ifo header
_line = _file.readline().split('=')
if _line[0] == 'version':
self.version = _line[1]
else:
raise Exception('ifo has invalid format')
_config = {}
for _line in _file:
_line_splited = _line.split('=')
_config[_line_splited[0]] = _line_splited[1]
_file.close()
self.bookname = _config.get('bookname', None).strip()
if self.bookname is None:
raise Exception('ifo has no bookname')
self.wordcount = _config.get('wordcount', None)
if self.wordcount is None:
raise Exception('ifo has no wordcount')
self.wordcount = int(self.wordcount)
if self.version == '3.0.0':
try:
#_syn = open('%s.syn' % dict_prefix) # not used
self.synwordcount = _config.get('synwordcount', None)
if self.synwordcount is None:
raise Exception(
'ifo has no synwordcount but .syn file exists')
self.synwordcount = int(self.synwordcount)
except IOError:
pass
self.idxfilesize = _config.get('idxfilesize', None)
if self.idxfilesize is None:
raise Exception('ifo has no idxfilesize')
self.idxfilesize = int(self.idxfilesize)
self.idxoffsetbits = _config.get('idxoffsetbits', 32)
self.idxoffsetbits = int(self.idxoffsetbits)
self.author = _config.get('author', '').strip()
self.email = _config.get('email', '').strip()
self.website =
|
_config.get('website', '').strip()
self.description = _config.get('description', '').strip()
self.date = _config.get('date', '').strip()
self.sametypesequence = _config.get('sametypesequence', '').strip()
class _StarDictIdx(object):
"""
The .idx file is just a word list.
The word list is a sorted list of word entries.
Each entry in the word list contains three fields,
|
one after the other:
word_str; // a utf-8 string terminated by '\0'.
word_data_offset; // word data's offset in .dict file
word_data_size; // word data's total size in .dict file
"""
def __init__(self, dict_prefix, container):
self._container = container
idx_filename = '%s.idx' % dict_prefix
idx_filename_gz = '%s.gz' % idx_filename
try:
file = open_file(idx_filename, idx_filename_gz)
except Exception as e:
raise Exception('idx file opening error: "{}"'.format(e))
self._file = file.read()
""" check file size """
if file.tell() != container.ifo.idxfilesize:
raise Exception('size of the .idx file is incorrect')
file.close()
""" prepare main dict and parsing parameters """
self._idx = {}
idx_offset_bytes_size = int(container.ifo.idxoffsetbits / 8)
idx_offset_format = {4: 'L', 8: 'Q', }[idx_offset_bytes_size]
idx_cords_bytes_size = idx_offset_bytes_size + 4
""" parse data via regex """
record_pattern = br'([\d\D]+?\x00[\d\D]{' + str(
idx_cords_bytes_size).encode('utf-8') + br'})'
matched_records = re.findall(record_pattern, self._file)
""" check records count """
if len(matched_records) != container.ifo.wordcount:
raise Exception('words count is incorrect')
""" unpack parsed records """
for matched_record in matched_records:
c = matched_record.find(b'\x00')
if c == 0:
continue
record_tuple = unpack(
'!%sc%sL' % (c + 1, idx_offset_format), matched_record)
word, cords = record_tuple[:c], record_tuple[c + 1:]
self._idx[b''.join(word)] = cords
def __getitem__(self, word):
"""
returns tuple (word_data_offset, word_data_size,) for word in .dict
@note: here may be placed flexible search realization
"""
return self._idx[word.encode('utf-8')]
def __contains__(self, k):
"""
returns True if index has a word k, else False
"""
return k.encode('utf-8') in self._idx
def __eq__(self, y):
"""
returns True if hashlib.md5(x.idx) is equal to hashlib.md5(y.idx), else False
"""
return hashlib.md5(self._file).hexdigest() == hashlib.md5(y._file).hexdigest()
def __ne__(self, y):
"""
returns True if hashlib.md5(x.idx) is not equal to hashlib.md5(y.idx), else False
"""
return not self.__eq__(y)
def iterkeys(self):
"""
returns iterkeys
"""
if not self._container.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
for key in six.iterkeys(self._idx):
yield key.decode('utf-8')
def keys(self):
"""
returns keys
"""
if six.PY3:
return self.iterkeys()
if not self._container.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
return [key.decode('utf-8') for key in self._idx.keys()]
class _StarDictDict(object):
"""
The .dict file is a pure data sequence, as the offset and size of each
word is recorded in the corresponding .idx file.
If the "sametypesequence" option is not used in the .ifo file, then
the .dict file has fields in the following order:
==============
word_1_data_1_type; // a single char identifying the data type
word_1_data_1_data; // the data
word_1_data_2_type;
word_1_data_2_data;
...... // the number of data entries for each word is determined by
// word_data_size in .idx file
word_2_data_1_type;
word_2_data_1_data;
......
==============
It's important to note that each field in each word indicates its
own length, as described below. The number of possible fields per
word is also not fixed, and is determined by simply reading data until
you've read word_data_size bytes for that word.
Suppose the "sametypesequence" option is used in the .idx file, and
the option is set like this:
sametypesequence=tm
Then the .dict file will look like this:
==============
word_1_data_1_data
word_1_data_2_data
word_2_data_1_data
word_2_data_2_data
......
==============
The
|
aronparsons/spacewalk
|
backend/server/action/kickstart_guest.py
|
Python
|
gpl-2.0
| 4,321 | 0.001389 |
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
from spacewalk.server.rhnLib import InvalidAction, ShadowAction
from spacewalk.server.action.utils import SubscribedChannel, \
ChannelPackage, \
PackageInstallScheduler, \
NoActionInfo, \
PackageNotFound
from spacewalk.server.rhnChannel import subscribe_to_tools_channel
__rhnexport__ = ['initiate', 'schedule_virt_guest_pkg_install', 'add_tools_channel']
_query_initiate_guest = rhnSQL.Statement("""
select ksd.label as profile_name, akg.kickstart_host, kvt.label as virt_type,
akg.mem_kb, akg.vcpus, akg.disk_path, akg.virt_bridge, akg.cobbler_system_name,
akg.disk_gb, akg.append_string,
akg.guest_name, akg.ks_session_id from rhnActionKickstartGuest akg,
rhnKSData ksd, rhnKickstartSession ksess,
rhnKickstartDefaults ksdef, rhnKickstartVirtualizationType kvt
where akg.action_id = :action_id
and ksess.kickstart_id = ksd.id
and ksess.id = akg.ks_session_id
and ksdef.kickstart_id = ksd.id
and ksdef.virtualization_type = kvt.id
""")
def schedule_virt_guest_pkg_install(server_id, action_id, dry_run=0):
"""
ShadowAction that schedules a package installation action for the
rhn-virtualization-guest package.
"""
log_debug(3)
virt_host_package_name = "rhn-virtualization-guest"
tools_channel = SubscribedChannel(server_id, "rhn-tools")
found_tools_channel = tools_channel.is_subscribed_to_channel()
if not found_tools_channel:
raise InvalidAction("System not subscribed to the RHN Tools channel.")
rhn_v12n_package = ChannelPackage(server_id, virt_host_package_name)
if not rhn_v12n_package.exists():
raise InvalidAction("Could not find the rhn-virtualization-guest package.")
try:
install_scheduler = PackageInstallScheduler(server_id, action_id, rhn_v12n_package)
if (not dry_run):
install_scheduler.schedule_package_install()
else:
log_debug(4, "dry run requested")
except NoActionInfo, nai:
raise InvalidAction(str(nai)), None, sys.exc_info()[2]
except PackageNotFound, pnf:
raise InvalidAction(str(pnf)), None, sys.exc_info()[2]
except Exception, e:
|
raise InvalidAction(str(e)), None, sys.exc_info()[2]
log_debug(3, "Completed scheduling install of rhn-virtualization-guest!")
raise ShadowAction("Scheduled installation of RHN Virtualization Guest packages.")
def initiate(server_id, action_id, dry_run=0):
log_debug(3)
h = rhnSQL.prepare(_query_initiate_guest)
h.execute(action_id=action_id)
row = h.fetchone_dict()
if not row
|
:
raise InvalidAction("Kickstart action without an associated kickstart")
kickstart_host = row['kickstart_host']
virt_type = row['virt_type']
name = row['guest_name']
boot_image = "spacewalk-koan"
append_string = row['append_string']
vcpus = row['vcpus']
disk_gb = row['disk_gb']
mem_kb = row['mem_kb']
ks_session_id = row['ks_session_id']
virt_bridge = row['virt_bridge']
disk_path = row['disk_path']
cobbler_system_name = row['cobbler_system_name']
if not boot_image:
raise InvalidAction("Boot image missing")
return (kickstart_host, cobbler_system_name, virt_type, ks_session_id, name,
mem_kb, vcpus, disk_gb, virt_bridge, disk_path, append_string)
def add_tools_channel(server_id, action_id, dry_run=0):
log_debug(3)
if (not dry_run):
subscribe_to_tools_channel(server_id)
else:
log_debug(4, "dry run requested")
raise ShadowAction("Subscribed guest to tools channel.")
|
cocoaaa/ml_gesture
|
feature_selection.py
|
Python
|
mit
| 14,963 | 0.026064 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 17:55:05 2015
@author: LLP-admin
"""
import os
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
#encoding
from sklearn.preprocessing import LabelEncoder
from load_data import (divideByClass, splitXY, piped_standardize)
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from report_tools import *
from itertools import chain, combinations
def g_pow
|
erset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
"""Returns the generator for powerset of the interable"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in xrange(len(s)+1))
######################################################################
########################################################################
#memo = {};
def getPwrSet(L):
"""
Given a list, return a list of all possible subsets (subli
|
sts).
For example, given L = [1,2,3], it returns [[], [1], [2], [3], [1,2], [1,3], [2,3], [1,2,3]].
This algorithm is memoized. Don't forget the memo (a dictionary) right above the definition
of this function.
"""
if frozenset(L) in memo:
pwrSet = memo[frozenset(L)];
else:
#Base case: empty set
if len(L) == 0:
print "this should be printed only once if memo is working"
pwrSet = [L];
else:
last_ele = L[-1];
prevSet = getPwrSet(L[0:-1])
newSet = [ele + [last_ele] for ele in prevSet];
pwrSet = prevSet + newSet;
memo[frozenset(L)] = pwrSet;
# print 'Done creating powerSets...'
return pwrSet
###Test for getPwrSet#####
#lists = [ [1], [2], [1,2], [1,3], [1,2,3]];
#L = ['A','B','C']
#print getPwrSet(L)
#print '\nlength: ', len(getPwrSet(L))
######################################################################
######################################################################
def makeStdDataSets2(filepath, nTr = 3, nTest = 10):
"""
Inputs
-nTr: number of training sessions, i.e. number of training instancers per class.
-nTest: number of test instances per class
Returns
-standardized train_x, test_x and label-encoded (from classes to integers) train_y, test_y
Note: test_set is constructed by instances that follow directly the training instances.
"""
dict_dataPerClass = divideByClass(filepath);
sampleDF = dict_dataPerClass.values()[0];
columns = sampleDF.columns;
batch = pd.DataFrame(columns = columns)
test_set = pd.DataFrame(columns = columns);
for dataPerClass in dict_dataPerClass.itervalues():
# assert( not(dataPerClass.isnull().any().any()) ) ; print 'No None in this class dataset!'
batch = batch.append(dataPerClass.iloc[0:nTr]);
#Now, need to prepare the test data set.
test_set = test_set.append( dataPerClass.iloc[nTr:nTr+nTest] )
#split the batch into features and labels
batch_x, train_y = splitXY(batch)
rawTest_x, rawTest_y = splitXY(test_set)
#Done creating training and test data sets for this session.
#Standardize the train data. Apply the mean and std parameter to scale the test data accordingly.
train_x, test_x = piped_standardize(batch_x, rawTest_x);
#Make sure the number of features in train_x and test_x are same
assert(len(train_x.columns) == len(test_x.columns));
#Label encoding
# batch_y.index = range(0, len(batch_y))
le = LabelEncoder()
le.fit(train_y);
train_y = le.transform(train_y)
test_y = le.transform(rawTest_y)
return train_x, train_y, test_x, test_y
def selectFeatureSet_RF(data_x, data_y, nFeatures):
"""Use Random Forest to find the best numFeatures of features, based on the given data_x."""
rf_filter = RandomForestClassifier(max_features = 'auto')
rf_filter.fit(data_x, data_y);
rankings = rf_filter.feature_importances_;
selectedBool = np.argsort(rankings)[-nFeatures:]
# selectedBool = sorted(range(len(rankings)), key = lambda x: rankings[x])[-nFeatures:];
return data_x.columns[selectedBool]
def evalFeatureSet(train_x, train_y, test_x, test_y, selectedFeatures, classifier):
if len(selectedFeatures) == 0:
score = 0.0
train_x = train_x[selectedFeatures];
test_x = test_x[selectedFeatures];
#Don't need to modify even after more filtering is applied later
#train the classifier on this batch
clf = classifier;
clf.fit(train_x, train_y);
#test the classifier on the fixed test set
score = clf.score(test_x, test_y);
return (frozenset(selectedFeatures), score)
def get_FS_report(filepath, classifier, nTr = 3, nTest = 10):
"""Get the report of featureSet size vs. %accuracy using Random Forest as the feature selection filter."""
#1. Get standardized train and test data
train_x, train_y, test_x, test_y = makeStdDataSets2(filepath, nTr, nTest);
#Total number of features is the number of columns in train_x ( this should equal that of test_x)
_, total = train_x.shape
# 2. select features with varying number of features
FS_report = {};
for nFeatures in range(1, total +1):
selectedFeatures = selectFeatureSet_RF(train_x, train_y, nFeatures);
featureSet, score = evalFeatureSet(train_x, train_y, test_x, test_y, selectedFeatures, classifier)
FS_report[featureSet] = score;
# print "\nfeature SET: ", featureSet
# print "score: ", score
return FS_report
def plot_FS_report(FS_report, clfName, fname):
plt.figure();
# plt.xlim([0,24]);#plt.xticks(np.arange(0, 24, 1.0));
# plt.ylim([0,1.0]);#plt.yticks(np.arange(0, 1.0, 0.1));
plt.xlabel("number of best features selected")
plt.ylabel("% accuracy")
plt.title("Report on: "+ fname+ \
"\nClassifier: "+ clfName);
for k,v in FS_report.iteritems():
plt.plot(len(k),v, 'bo')
plt.hold
plt.show()
def get_PCA_FS_report(filepath, classifier, nTr = 3, nTest = 10):
"""Get the report of featureSet size vs. %accuracy using Random Forest as the feature selection filter.
PCA is applied after feature selection"""
#1. Get standardized train and test data
all_train_x, train_y, all_test_x, test_y = makeStdDataSets2(filepath, nTr, nTest);
#Total number of features is the number of columns in train_x ( this should equal that of test_x)
_, total = all_train_x.shape
# 2. select features with varying number of features
PCA_report = {};
for nFeatures in range(1, total +1):
selectedFeatures = selectFeatureSet_RF(all_train_x, train_y, nFeatures);
# print selectedFeatures
#
#Select only the top-nFeatures features
train_x = all_train_x[selectedFeatures]
test_x = all_test_x[selectedFeatures]
#Run PCA
pca = PCA(n_components = nFeatures);
PCA_train_x = pca.fit_transform(train_x)
PCA_test_x = pca.transform(test_x)
#classifier initialization, training and testing
clf = classifier
clf.fit(PCA_train_x, train_y);
score = clf.score(PCA_test_x, test_y);
PCA_report[frozenset(selectedFeatures)] = score;
# print "\nfeature SET: ", len(selectedFeatures)
# print "score: ", score
return PCA_report
def get_PCA_report(filepath, classifier, nTr = 3, nTest = 10):
"""Get the report of featureSet size vs. %accuracy using Random Forest as the feature selection filter.
PCA is applied after feature selection"""
#1. Get standardized train and test data
all_train_x, train_y, all_test_x, test_y = makeStdDataSets2(filepath, nTr, nTest);
#Total number of features is the number of columns in train_x ( this should equal that of test_x)
_, t
|
Bielicki/lcda
|
excel_upload/apps.py
|
Python
|
gpl-3.0
| 98 | 0 |
from django.app
|
s import AppConfig
class ExcelUploadConfig(AppConfig):
na
|
me = 'excel_upload'
|
robacklin/sigrok
|
libsigrokdecode/decoders/z80/__init__.py
|
Python
|
gpl-3.0
| 1,313 | 0.009139 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Daniel Elstner <daniel.kitta@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more
|
details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
The Zilog Z80 is an 8-bit microprocessor compatible with the Intel 8080.
In addition to the 8-bit data bus, this decoder requires the input signals
/M1 (machine cycle), /RD (read) and
|
/WR (write) to do its work. An explicit
clock signal is not required. However, the Z80 CPU clock may be used as
sampling clock, if applicable.
Notes on the Z80 opcode format and descriptions of both documented and
"undocumented" opcodes are available here:
http://www.z80.info/decoding.htm
http://clrhome.org/table/
'''
from .pd import Decoder
|
katyhuff/moose
|
python/MooseDocs/MooseApplicationSyntax.py
|
Python
|
lgpl-2.1
| 13,523 | 0.002884 |
import os
import re
import copy
import collections
import logging
import MooseDocs
import collections
import subprocess
import yaml
log = logging.getLogger(__name__)
class PagesHelper(object):
"""
A helper class for checking if a markdown file is include in the 'pages.yml' file.
"""
def __init__(self, pages):
self.pages = MooseDocs.yaml_load(pages)
self.raw = yaml.dump(self.p
|
ages, default_flow_style=False)
def check(self, filename):
return filename in self.raw
@staticmethod
def create(root):
"""
Generated nested 'pages.yml' files.
"""
# Define the pages.yml file
pages = os.path.join(root, 'pages.yml')
content = []
if not os.path.exists(root):
os.makedirs(root)
# Loop through the contents of the
|
directory
for item in os.listdir(root):
full = os.path.join(root, item)
txt = None
if os.path.isdir(full):
txt = '- {}: !include {}\n'.format(item, os.path.join(full, 'pages.yml'))
PagesHelper.create(full)
elif full.endswith('.md'):
txt = '- {}: {}\n'.format(item[:-3], full)
if txt:
if txt.startswith('- Overview:'):
content.insert(0, txt)
else:
content.append(txt)
# Write the contents
with open(pages, 'w') as fid:
log.info('Writing pages file: {}'.format(pages))
for line in content:
fid.write(line)
class MooseApplicationSyntax(object):
"""
An object for handling the registered object and action syntax for a specific set of directories.
A compiled MOOSE application contains all included libraries (i.e., framework, modules, and other applications), thus
when an application is executed with --yaml in includes the complete syntax.
To allow for documentation to be generated to only include the objects and syntax specific to an application the syntax
defined in the application source directory must be separated from that of the entire library. This object builds maps to
the registered objects and syntax specific to the application.
Args:
yaml[MooseYaml]: The MooseYaml object obtained by running the application with --yaml option.
paths[list]: Valid source directory to extract syntax.
doxygen[str]: The URL to the doxygen page.
Optional Args (only needed when check() method is called, see generate.py)
pages[list]: The .yml file containing the website layout (mkdocs 'pages' configuration option)
name[str]: The name of the syntax group (i.e., the key used in the 'locations' configuration for MooseMarkdown)
install[str]: The install directory for the markdown (see MooseMarkdown config)
generate[bool]: When True stub pages are generated if they do not exist
"""
def __init__(self, yaml_data, paths=[], doxygen=None, pages='pages.yml', name=None, install=None, stubs=False, pages_stubs=False, **kwargs):
# Store the input variables
self._yaml_data = yaml_data
self.paths = paths
self.install = install
self.stubs = stubs
self.pages_stubs = pages_stubs
self.doxygen = doxygen
self.name = name
if pages:
self.pages = PagesHelper(pages)
# The databases containing the system/object/markdown/source information for this directory
self._systems = set()
self._objects = dict()
self._filenames = dict()
self._syntax = set()
self._markdown = list() # A list of markdown files, used for updating pages.yml
# Update the syntax maps
for path in paths:
if (not path):
log.critical("Missing or invalid source/include directory.")
raise Exception("A directory with a value of None was supplied, which is not allowed.")
elif not os.path.exists(path):
log.critical("Unknown source directory supplied: {}".format(os.path.abspath(path)))
raise IOError(os.path.abspath(path))
self._updateSyntax(path)
for s in self._syntax:
nodes = self._yaml_data[s]
for node in nodes:
name = node['name'].split('/')[-1]
if name not in self._objects:
self._systems.add(node['name'].rstrip('/*'))
else:
name = node['name'].rsplit('/', 1)[0]
self._systems.add(name)
def systems(self):
"""
Return a set of MOOSE systems defined in the supplied directories.
"""
return self._systems
def hasSystem(self, name):
"""
Returns True when the supplied name is a system in this object.
"""
return name in self._systems
def objects(self):
"""
Returns a set of MOOSE objects defined in the supplied directories.
"""
return self._objects
def hasObject(self, name):
"""
Returns True when the supplied name is an object stored in the syntax object.
"""
return name in self._objects
def filenames(self, name):
"""
Return the filename(s), *h (and *.C) for the given object name.
"""
return self._filenames[self._objects[name]]
def check(self):
"""
Check that the application documentation exists, create stubs if it does not.
"""
for node in self._yaml_data.get():
self._checkNode(node)
if self.pages_stubs:
self.pages.create(self.install)
for md in self._markdown:
if not self.pages.check(md):
log.error('The markdown file {} was not found in the pages.yml'.format(md))
def _checkNode(self, node):
"""
Check a YAML node.
Args:
node[str]: The syntax connected to this object.
"""
full_name = node['name']
obj_name = node['name'].split('/')[-1]
if full_name.endswith('*') or full_name.endswith('<type>'):
return
if self.hasSystem(full_name):
self._checkSystem(node)
if self.hasObject(obj_name):
self._checkObject(node, obj_name)
if node['subblocks']:
for child in node['subblocks']:
self._checkNode(child)
def _checkSystem(self, node):
"""
Check the status of the documentation for a system.
Args:
node[str]: The syntax connected to this system.
"""
# The full name of the object
name = node['name']
stub = '<!-- MOOSE System Documentation Stub: Remove this when content is added. -->'
# Determine the filename
if node['subblocks']:
filename = os.path.join(self.install, name.rstrip('*').strip('/'), 'Overview.md')
else:
filename = os.path.join(self.install, name.rstrip('*').strip('/') + '.md')
if not os.path.exists(filename):
log.error("No documentation for {}. Documentation for this system should be created in: {}".format(name, os.path.abspath(filename)))
if self.stubs:
self._markdown.append(filename)
stub += '\n# {} System\n'.format(name.split('/')[-1])
stub += '!parameters {}\n\n'.format(name)
has_subobjects = False
has_subsystems = False
if node['subblocks']:
for child in node['subblocks']:
if self.hasObject(child['name'].split('/')[-1]):
has_subobjects = True
if self.hasSystem(child['name']):
has_subsystems = True
if has_subobjects:
stub += '!subobjects {} {}\n\n'.format(self.name, name)
if has_subsystems:
stub += '!subsystems {} {}\n\n'.format(self.name, name)
# Write the stub file
|
geokala/cloudify-agent
|
cloudify_agent/api/utils.py
|
Python
|
apache-2.0
| 10,561 | 0 |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import json
import copy
import tempfile
import os
import getpass
import pkg_resources
from jinja2 import Template
from cloudify.utils import setup_logger
import cloudify_agent
from cloudify_agent import VIRTUALENV
from cloudify_agent.api import defaults
logger = setup_logger('cloudify_agent.api.utils')
class _Internal(object):
"""
Contains various internal utility methods. Import this at your own
peril, as backwards compatibility is not guaranteed.
"""
CLOUDIFY_DAEMON_NAME_KEY = 'CLOUDIFY_DAEMON_NAME'
CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY = 'CLOUDIFY_DAEMON_STORAGE_DIREC
|
TORY'
CLOUDIFY_DAEMON_USER_KEY = 'CLOUDIFY_DAEMON_USER'
@classmethod
def get_daemon_name(cls):
"""
Returns the name of the currently running daemon.
"""
return os.environ[cls.CLOUDIFY_DAEMON_NAME_KEY]
@classmethod
def get_daemon_storage_dir(cls):
"""
Returns the storage directory the
|
current daemon is stored under.
"""
return os.environ[cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY]
@classmethod
def get_daemon_user(cls):
"""
Return the user the current daemon is running under
"""
return os.environ[cls.CLOUDIFY_DAEMON_USER_KEY]
@staticmethod
def get_storage_directory(username=None):
"""
Retrieve path to the directory where all daemon
registered under a specific username will be stored.
:param username: the user
"""
return os.path.join(get_home_dir(username), '.cfy-agent')
@staticmethod
def generate_agent_name():
"""
Generates a unique name with a pre-defined prefix
"""
return '{0}-{1}'.format(
defaults.CLOUDIFY_AGENT_PREFIX,
uuid.uuid4())
@staticmethod
def daemon_to_dict(daemon):
"""
Return a json representation of the daemon by copying the __dict__
attribute value. Also notice that this implementation removes any
attributes starting with the underscore ('_') character.
:param daemon: the daemon.
:type daemon: cloudify_agent.api.pm.base.Daemon
"""
try:
getattr(daemon, '__dict__')
except AttributeError:
raise ValueError('Cannot save a daemon with '
'no __dict__ attribute.')
# don't use deepcopy here because we this will try to copy
# the internal non primitive attributes
original = daemon.__dict__
result = copy.copy(original)
for attr in original:
if attr.startswith('_'):
result.pop(attr)
return result
internal = _Internal()
def get_agent_stats(name, celery):
"""
Query for agent stats based on agent name.
:param name: the agent name
:param celery: the celery client to use
:return: agents stats
:rtype: dict
"""
destination = 'celery@{0}'.format(name)
inspect = celery.control.inspect(
destination=[destination])
stats = (inspect.stats() or {}).get(destination)
return stats
def get_home_dir(username=None):
"""
Retrieve the home directory of the given user. If no user was specified,
the currently logged user will be used.
:param username: the user.
"""
if os.name == 'nt':
if username is None:
return os.path.expanduser('~')
else:
return os.path.expanduser('~{0}'.format(username))
else:
import pwd
if username is None:
if 'SUDO_USER' in os.environ:
# command was executed via sudo
# get the original user
username = os.environ['SUDO_USER']
else:
username = getpass.getuser()
return pwd.getpwnam(username).pw_dir
def render_template_to_file(template_path, file_path=None, **values):
"""
Render a 'jinja' template resource to a temporary file.
:param template_path: relative path to the template.
:param file_path: absolute path to the desired output file.
:param values: keyword arguments passed to jinja.
"""
template = get_resource(template_path)
rendered = Template(template).render(**values)
return content_to_file(rendered, file_path)
def resource_to_tempfile(resource_path):
"""
Copy a resource into a temporary file.
:param resource_path: relative path to the resource.
:return path to the temporary file.
"""
resource = get_resource(resource_path)
return content_to_file(resource)
def get_resource(resource_path):
"""
Loads the resource into a string.
:param resource_path: relative path to the resource.
"""
return pkg_resources.resource_string(
cloudify_agent.__name__,
os.path.join('resources', resource_path)
)
def get_absolute_resource_path(resource_path):
"""
Retrieves the absolute path in the file system of a resource of the
package.
:param resource_path: the relative path to the resource
"""
return pkg_resources.resource_filename(
cloudify_agent.__name__,
os.path.join('resources', resource_path)
)
def content_to_file(content, file_path=None):
"""
Write string to a temporary file.
:param content:
:param file_path: absolute path to the desired output file.
"""
if not file_path:
file_path = tempfile.NamedTemporaryFile(mode='w', delete=False).name
with open(file_path, 'w') as f:
f.write(content)
f.write(os.linesep)
return file_path
def get_executable_path(executable):
"""
Lookup the path to the executable, os agnostic
:param executable: the name of the executable
"""
if os.name == 'posix':
return '{0}/bin/{1}'.format(VIRTUALENV, executable)
else:
return '{0}\\Scripts\\{1}'.format(VIRTUALENV, executable)
def get_cfy_agent_path():
"""
Lookup the path to the cfy-agent executable, os agnostic
:return: path to the cfy-agent executable
"""
return get_executable_path('cfy-agent')
def get_pip_path():
"""
Lookup the path to the pip executable, os agnostic
:return: path to the pip executable
"""
return get_executable_path('pip')
def get_celery_path():
"""
Lookup the path to the celery executable, os agnostic
:return: path to the celery executable
"""
return get_executable_path('celery')
def get_python_path():
"""
Lookup the path to the python executable, os agnostic
:return: path to the python executable
"""
return get_executable_path('python')
def env_to_file(env_variables, destination_path=None, posix=True):
"""
Write environment variables to a file.
:param env_variables: environment variables
:param destination_path: destination path of a file where the
environment variables will be stored. the
stored variables will be a bash script you can
then source.
:param posix: false if the target of the generated file will be a
windows machine
"""
if not env_variables:
return None
if not destination_path:
destination_path = tempfile.mkstemp(suffix='env')[1]
if posix:
linesep = '\n'
else:
linesep = '\r\n'
with open(destination_path, 'w') as f:
if posix:
f
|
yochow/autotest
|
client/tests/isic/isic.py
|
Python
|
gpl-2.0
| 831 | 0.008424 |
import os
from autotest_lib.client.bin import test, utils
class isic(test.test):
version = 2
# http://www.packetfactory.net/Projects/ISIC/isic-0.06.tgz
# + http://www.stardust.webpages.pl/files/crap/isic-gcc41-fix.patch
def initialize(self):
self.job.require_gcc()
self.job.setup_dep(['libnet'])
def setup(self, tarball = 'isic-0.06.tar.bz2'):
t
|
arball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(tarball, self.srcdir)
os.chdir(self.srcdir)
utils.system('patch -p1 < ../build-fixes.patch')
utils.system('PREFIX=%s /deps/libnet/libnet/ ./configure' %self.autodir)
utils.system('make')
def execute(self, args = '-s rand -d 127.0.0.1
|
-p 10000000'):
utils.system(self.srcdir + '/isic ' + args)
|
reyoung/Paddle
|
python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py
|
Python
|
apache-2.0
| 2,170 | 0 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import six
import numpy as np
from op_test import OpTest
class TestSequenceUnpadOp(OpTest):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 5)
self.dtype = "float32"
def compute(self):
assert len(self.length) == self.x_shape[0]
x = np.random.random(self.x_shape).astype(self.dtype)
out_lod = [self.length]
out = x[0, 0:self.length[0]]
for i in six.moves.
|
xrange(1, x.shape[0]):
out = np.append(out, x[i, 0:self.length[i]], axis=0)
out_shape = (sum(self.length), )
if len(self.x_shape) == 2:
out_shape = out_shape + (1, )
else:
out_shape = out_shape + self.x_shape[2:]
self.i
|
nputs = {
'X': x,
'Length': np.array(self.length).astype('int64').reshape(-1, 1)
}
self.outputs = {'Out': (out.reshape(out_shape), out_lod)}
def setUp(self):
self.op_type = 'sequence_unpad'
self.init()
self.compute()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSequenceUnpadOp2(TestSequenceUnpadOp):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 5, 4, 3)
self.dtype = "float32"
class TestSequenceUnpadOp3(TestSequenceUnpadOp):
def init(self):
self.length = [5, 2, 3, 4]
self.x_shape = (4, 5, 3, 3, 6)
self.dtype = "float64"
if __name__ == '__main__':
unittest.main()
|
carvalhodj/qunews
|
rabbitmq/receive.py
|
Python
|
apache-2.0
| 1,031 | 0.00291 |
import pika
import sys
credentials = pika.PlainCredentials('qunews', 'qunews')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, 'qunews_host', credentials))
channel = connection.channel()
channel.exchange_declare(exchange='qunews_data',
type='topic')
result = channel.queue_de
|
clare()
queue_name = result.method.queue
binding_keys = sys.argv[1:]
if not binding_keys:
sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
sys.exit(1)
for binding_key in binding_keys:
channel.queue_bind(exchange='qunews_data',
queue=queue_name,
routing_key=binding_key)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] %r:%r"
|
% (method.routing_key, body))
if body[0:2] == 'ef':
print("MAC comeca com 'ef'")
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
|
djkonro/client-python
|
kubernetes/client/models/v1_container_state_waiting.py
|
Python
|
apache-2.0
| 3,830 | 0.000522 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerStateWaiting(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, message=None, reason=None):
"""
V1ContainerStateWaiting - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'message': 'str',
'reason': 'str'
}
self.attribute_map = {
'message': 'message',
'reason': 'reason'
}
self._message = message
self._reason = reason
@property
def message(self):
"""
Gets the message of this V1ContainerStateWaiting.
Message regarding why the container is not yet running.
:return: The message of this V1ContainerStateWaiting.
:rtype: str
"""
return self._message
@me
|
ssage.setter
def message(self, message):
"""
Sets the message of this V1ContainerStateWaiting.
Message regarding why the container is not yet running.
:param message: The message of this V1ContainerStateWaiting.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this V1ContainerStateWaiting.
(brief) reason the container i
|
s not yet running.
:return: The reason of this V1ContainerStateWaiting.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1ContainerStateWaiting.
(brief) reason the container is not yet running.
:param reason: The reason of this V1ContainerStateWaiting.
:type: str
"""
self._reason = reason
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerStateWaiting):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
yceruto/django-formapi
|
formapi/__init__.py
|
Python
|
mit
| 348 | 0 |
VERSION = (0, 0, 1, 'dev')
# Dynamically calculate the version based on VERSION tuple
if len(VERSION) > 2 and VERSION[2] is not None:
|
if isinstance(VERSION[2], int):
str_version = "%s.%s.%s" % VERSION[:3]
|
else:
str_version = "%s.%s_%s" % VERSION[:3]
else:
str_version = "%s.%s" % VERSION[:2]
__version__ = str_version
|
skmezanul/seahub
|
seahub/profile/models.py
|
Python
|
apache-2.0
| 3,686 | 0.003256 |
from django.conf import settings
from django.db import models
from django.core.cache import cache
from django.dispatch import receiver
from seahub.base.fields import LowerCaseCharField
from seahub.profile.settings import EMAIL_ID_CACHE_PREFIX, EMAIL_ID_CACHE_TIMEOUT
from registration.signals import user_registered
class ProfileManager(models.Manager):
def add_or_update
|
(self, username, nickname, intro, lang_code=None):
"""Add or update user profile.
"""
try:
profile = self.get(user=username)
profile.nickname = nickname
profile.intro = intro
profile.lang_code = lang_code
except Profile.DoesNotExist:
profile = self.model(user=user
|
name, nickname=nickname,
intro=intro, lang_code=lang_code)
profile.save(using=self._db)
return profile
def get_profile_by_user(self, username):
"""Get a user's profile.
"""
try:
return super(ProfileManager, self).get(user=username)
except Profile.DoesNotExist:
return None
def get_user_language(self, username):
"""Get user's language from profile. Return default language code if
user has no preferred language.
Arguments:
- `self`:
- `username`:
"""
try:
profile = self.get(user=username)
if profile.lang_code is not None:
return profile.lang_code
else:
return settings.LANGUAGE_CODE
except Profile.DoesNotExist:
return settings.LANGUAGE_CODE
def delete_profile_by_user(self, username):
self.filter(user=username).delete()
class Profile(models.Model):
user = models.EmailField(unique=True)
nickname = models.CharField(max_length=64, blank=True)
intro = models.TextField(max_length=256, blank=True)
lang_code = models.TextField(max_length=50, null=True, blank=True)
objects = ProfileManager()
def set_lang_code(self, lang_code):
self.lang_code = lang_code
self.save()
class DetailedProfileManager(models.Manager):
def add_detailed_profile(self, username, department, telephone):
d_profile = self.model(user=username, department=department,
telephone=telephone)
d_profile.save(using=self._db)
return d_profile
def add_or_update(self, username, department, telephone):
try:
d_profile = self.get(user=username)
d_profile.department = department
d_profile.telephone = telephone
except DetailedProfile.DoesNotExist:
d_profile = self.model(user=username, department=department,
telephone=telephone)
d_profile.save(using=self._db)
return d_profile
def get_detailed_profile_by_user(self, username):
"""Get a user's profile.
"""
try:
return super(DetailedProfileManager, self).get(user=username)
except DetailedProfile.DoesNotExist:
return None
class DetailedProfile(models.Model):
user = LowerCaseCharField(max_length=255, db_index=True)
department = models.CharField(max_length=512)
telephone = models.CharField(max_length=100)
objects = DetailedProfileManager()
########## signal handler
@receiver(user_registered)
def clean_email_id_cache(sender, **kwargs):
from seahub.utils import normalize_cache_key
user = kwargs['user']
key = normalize_cache_key(user.email, EMAIL_ID_CACHE_PREFIX)
cache.set(key, user.id, EMAIL_ID_CACHE_TIMEOUT)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.