repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
davidbradway/fusefit
|
webui/app/views.py
|
Python
|
mit
| 2,160 | 0.008333 |
import os
# We'll render HTML templates and access data sent by P
|
OST
# using the re
|
quest object from flask. Redirect and url_for
# will be used to redirect the user once the upload is done
# and send_from_directory will help us to send/show on the
# browser the file that the user just uploaded
from flask import Flask, render_template, request, flash, redirect, url_for, send_from_directory
from app import app
from werkzeug.utils import secure_filename
# For a given file, return whether it's an allowed type or not
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/', methods=['GET','POST'])
@app.route('/index', methods=['GET','POST'])
def index():
if request.method == 'POST':
# check if the post request has the file part
filename = []
for upfile in ['filewohr','filewhr']:
if upfile not in request.files:
flash('No file part')
return redirect(request.url)
# Get the name of the uploaded file
file = request.files[upfile]
# if user does not select file, browser also
# submits a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# Check if the file is one of the allowed types/extensions
if file and allowed_file(file.filename):
# Make the filename safe, remove unsupported chars
filename.append(secure_filename(file.filename))
# Move the file form the temporary folder to the upload folder
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename[-1]))
else:
flash('Not allowed file')
return redirect(request.url)
# Render the file template
return render_template('file.html',
folder = app.config['UPLOAD_FOLDER'],
filenamewohr = filename[0],
filenamewhr = filename[1],
scroll = 'results')
return render_template('index.html')
|
Open365/Open365
|
lib/EyeosApi/Logout.py
|
Python
|
agpl-3.0
| 1,097 | 0.002735 |
import time
from lib.DomainObjects.EyeosCard import EyeosCard
from lib.Errors.EyeosAPIError import EyeosAPIError
from lib.EyeosApi.EyeosApiCall import EyeosApiCall
from lib.Settings import Set
|
tings
from lib.Wrappe
|
rs.Logger import Logger
class Logout:
def __init__(self, injected_proxy_ip=None, injected_eyeos_api_call=None):
self.settings = Settings().getSettings()
self.proxy_ip = injected_proxy_ip or self.settings['general']['public_hostname']
self.logger = Logger(__name__)
self.eyeos_api_call = injected_eyeos_api_call or EyeosApiCall()
def logout(self, card):
self.logger.info("Retrieving a valid card...")
data = {
'timestamp': int(time.time())
}
logout_url = "https://{0}/relay/presence/v1/routingKey/logout/userEvent/logout".format(self.proxy_ip)
self.logger.debug('POST request to: {0}'.format(logout_url))
req = self.eyeos_api_call.post(logout_url, verify=False, data=data, card=card)
if req.status_code != 200:
raise ValueError("Error logging out with user")
|
PieterMostert/Lipgloss
|
model/lipgloss/lp_recipe_problem.py
|
Python
|
gpl-3.0
| 17,317 | 0.009875 |
# LIPGLOSS - Graphical user interface for constructing glaze recipes
# Copyright (C) 2017 Pieter Mostert
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# version 3 along with this program (see LICENCE.txt). If not, see
# <http://www.gnu.org/licenses/>.186
# Contact: pi.mostert@gmail.com
import tkinter
from tkinter import messagebox # eliminate
from view.pretty_names import prettify # eliminate
from functools import partial
import shelve
import copy
from .core_data import CoreData
from .recipes import restr_keys
from .pulp2dim import *
from pulp import *
import time
solver = GLPK(msg=0)
#solver = PULP_CBC_CMD(msg=0)
#solver = None
# Based on https://scaron.info/blog/linear-programming-in-python-with-cvxopt.html,
# it seems the glpk solver provided by cvxopt is much faster than the one provided by pulp.
# Unfortunately I can't get cvxopt to run on Windows 7 with Python 3.6, so I'm sticking with
# pulp for now.
class LpRecipeProblem(LpProblem):
def __init__(self, name, max_or_min, core_data):
'''Basic LP problem constraints that always hold'''
#super().__init__()
LpProblem.__init__(self, name, max_or_min)
#CoreData.__init__(self)
self.ingredient_dict = core_data.ingredient_dict
self.oxide_dict = core_data.oxide_dict
self.other_dict = core_data.other_dict
self.ingredient_analyses = core_data.ingredient_analyses
self.other_attr_dict = core_data.other_attr_dict
self.lp_var = {} # self.lp_var is a dictionary for the variables in the linear programming problem
# Create variables used to normalize:
for total in ['ingredient_total', 'fluxes_total', 'ox_mass_total', 'ox_mole_total']:
self.lp_var[total] = pulp.LpVariable(total, 0, None, pulp.LpContinuous)
for ox in self.oxide_dict:
self.lp_var['mole_'+ox] = pulp.LpVariable('mole_'+ox, 0, None, pulp.LpContinuous)
self.lp_var['mass_'+ox] = pulp.LpVariable('mass_'+ox, 0, None, pulp.LpContinuous)
# Relate mole percent and unity:
self += self.lp_var['mole_'+ox] * self.oxide_dict[ox].molar_mass == self.lp_var['mass_'+ox]
self += self.lp_var['fluxes_total'] == sum(self.oxide_dict[ox].flux * self.lp_var['mole_'+ox] for ox in self.oxide_dict)
self += self.lp_var['ox_mass_total'] == sum(self.lp_var['mass_'+ox] for ox in self.oxide_dict)
self += self.lp_var['ox_mole_total'] == sum(self.lp_var['mole_'+ox] for ox in self.oxide_dict)
for i in self.other_attr_dict:
self.lp_var['other_attr_'+i] = pulp.LpVariable('other_attr_'+i, 0, None, pulp.LpContinuous)
# May move the next section out of __init__
for index in self.ingredient_dict:
ing = 'ingredient_'+index
self.lp_var[ing] = pulp.LpVariable(ing, 0, None, pulp.LpContinuous)
# Relate ingredients, oxides and other attributes:
self.update_ingredient_analyses()
self += self.lp_var['ingredient_total'] == sum(self.lp_var['ingredient_'+index] for index in self.ingredient_dict), 'ing_total'
for index in self.other_dict:
ot = 'other_'+index
coefs = self.other_dict[index].numerator_coefs
linear_combo = [(self.lp_var[key], coefs[key]) for key in coefs]
self.lp_var[ot] = pulp.LpVariable(ot, 0, None, pulp.LpContinuous)
# Relate this variable to the other variables:
self += self.lp_var[ot] == LpAffineExpression(linear_combo), ot
def update_ingredient_analyses(self):
"To be run when the composition of any ingredient is changed. May be better to do this for a specific ingredient"
for ox in self.oxide_dict:
self.constraints[ox] = sum(self.ingredient_analyses[j][ox] * self.lp_var['ingredient_'+j]/100 \
for j in self.ingredient_dict if ox in self.ingredient_analyses[j]) \
== self.lp_var['mass_'+ox]
for i in self.other_attr_dict:
self.constraints['other_attr_'+i] = sum(self.ingredient_dict[j].other_attributes[i] * self.lp_var['ingredient_'+j]/100 \
for j in self.ingredient_dict if i in self.ingredient_dict[j].other_attributes) \
== self.lp_var['other_attr_'+i]
def remove_ingredient(self, i, core_data):
try:
core_data.remove_ingredient(i)
except:
pass
## self._variables.remove(self.lp_var['ingredient_'+i])
# The commented-out line above doesn't work in general since self.lp_var['ingredient_'+i] is regarded as
# being equal to all entries of self._variables, so it removes the first entry. Instead, we need to use 'is'.
for k, j in enumerate(self._variables):
if j is self.lp_var['ingredient_'+i]:
del self._var
|
iables[k]
try:
del self.constraints['ingredient_'+i+'_lower'] # Is this necessary?
del self.constraints['ingredient_'+i+'_upper'] # Is this necessary?
except:
pass
self.constraints['ing_total'] = self.lp_var['ingredient_total'] == \
sum(self.lp_var['ingredient_'+j] for j in self.ingredient_dict)
self.update_ingredient_analyses()
def add_ingred
|
ient(self, i, core_data):
pass
def update_other_restrictions(self):
"To be run when CoreData.other_dict is changed. May be better to do this for a specific other restriction"
for i in self.other_dict:
ot = 'other_'+i
coefs = self.other_dict[i].numerator_coefs
linear_combo = [(self.lp_var[key], coefs[key]) for key in coefs]
self.constraints[ot] = self.lp_var[ot] == LpAffineExpression(linear_combo)
def remove_other_restriction(self, i, core_data):
try:
core_data.remove_other_restriction(i)
except:
pass
## self._variables.remove(self.lp_var['other_'+i])
# The commented-out line above doesn't work in general since self.lp_var['other_'+i] is regarded as
# being equal to all entries of self._variables, so it removes the first entry. Instead, we need to use 'is'.
ot = 'other_'+i
try:
del self.constraints[ot]
except:
pass
for k, j in enumerate(self._variables):
if j is self.lp_var[ot]:
del self._variables[k]
try:
del self.constraints[ot+'_lower'] # Is this necessary?
del self.constraints[ot+'_upper'] # Is this necessary?
except:
pass
##Proposed rearrangement: Move variables and constraints relating to ingredients and other restrictions
##from LpRecipeProblem.__init__ to LpRecipeProblem.calc_restrictions.
##Add default bounds to recipe initialization
##Change format of normalizations
def calc_restrictions(self, recipe, restr_dict): # first update recipe.
# Should be able to construct a reduced restr_dict from recipe
t0 = time.process_time()
# First, test for obvious errors
if sum(self.oxide_dict[ox].flux for ox in recipe.oxides) == 0:
messagebox.showerror(" ", 'No flux! You have to give a flux.')
return
# Run tests to see if the denominators of other restrictions are identically zero?
for key in recipe.restriction_keys:
if recipe.lower_bounds[key] > recipe.upper_bounds[key]:
res
|
savi-dev/quantum
|
quantum/plugins/ryu/nova/linux_net.py
|
Python
|
apache-2.0
| 2,805 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# <yamahata at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ryu.app.client import OFPClient
from nova import flags
from nova import log as logging
from nova.network import linux_net
from nova.openstack.common import cfg
from nova import utils
LOG = logging.getLogger(__name__)
ryu_linux_net_opt = cfg.StrOpt('linuxnet_ovs_ryu_api_host',
default='127.0.0.1:8080',
help='Openflow Ryu REST API host:port')
FLAGS = flags.FLAGS
FLAGS.register_opt(ryu_linux_net_opt)
def _get_datapath_id(bridge_name):
out, _err = utils.execute('ovs-vsctl', 'get', 'Bridge',
bridge_name, 'datapath_id', run_as_root=True)
return out.strip().strip('"')
def _get_port_no(dev):
out, _err = utils.execute('ovs-vsctl', 'get', 'Interface', dev,
|
'ofport', run_as_root=True)
return int(out.strip())
class LinuxOVSRyuInterfaceDriver(linux_net.LinuxOVSInterfaceDriver):
def __init__(self):
super(LinuxOVSRyuInterfaceDriver, self).__init__()
LOG.debug('ryu rest host %s', FLAGS.linuxnet_ovs_ryu_api_host)
self.ryu_client = OFPClient(FLAGS.linuxnet_ovs_ryu_api_host)
self.datapath_id = _get_datapath_id(
|
FLAGS.linuxnet_ovs_integration_bridge)
if linux_net.binary_name == 'nova-network':
for tables in [linux_net.iptables_manager.ipv4,
linux_net.iptables_manager.ipv6]:
tables['filter'].add_rule(
'FORWARD',
'--in-interface gw-+ --out-interface gw-+ -j DROP')
linux_net.iptables_manager.apply()
def plug(self, network, mac_address, gateway=True):
LOG.debug("network %s mac_adress %s gateway %s",
network, mac_address, gateway)
ret = super(LinuxOVSRyuInterfaceDriver, self).plug(
network, mac_address, gateway)
port_no = _get_port_no(self.get_dev(network))
self.ryu_client.create_port(network['uuid'], self.datapath_id, port_no)
return ret
|
swilly22/redisgraph-py
|
redisgraph/execution_plan.py
|
Python
|
bsd-2-clause
| 5,416 | 0.001477 |
class Operation:
"""
Operation, single operation within execution plan.
"""
def __init__(self, name, args=None):
"""
Create a new operation.
Args:
name: string that represents the name of the operation
args: operation arguments
"""
self.name = name
self.args = args
self.children = []
def append_child(self, child):
if not isinstance(child, Operation) or self is child:
raise Exception("child must be Operation")
self.children.append(child)
return self
def child_count(self):
return len(self.children)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Operation):
return False
return (self.name == o.name and self.args == o.args)
def __str__(self) -> str:
args_str = "" if self.args is None else f" | {self.args}"
return f"{self.name}{args_str}"
class ExecutionPlan:
"""
ExecutionPlan, collection of operations.
"""
def __init__(self, plan):
"""
Create a new execution plan.
Args:
plan: array of strings that represents the collection operations
the output from GRAPH.EXPLAIN
"""
if not isinstance(plan, list):
raise Exception("plan must be an array")
self.plan = plan
self.structured_plan = self._operation_tree()
def _compare_operations(self, root_a, root_b):
"""
Compare execution plan operation tree
Return: True if operation trees are equal, False otherwise
"""
# compare current root
if root_a != root_b:
return False
# make sure root have the same number of children
if root_a.child_count() != root_b.child_count():
return False
# recursively compare children
for i in range(root_a.child_count()):
if not self._compare_operations(root_a.children[i], root_b.children[i]):
return False
return True
def __str__(self) -> str:
def aggraget_str(str_children):
return "\n".join([" " + line for str_child in str_children for line in str_child.splitlines()])
def combine_str(x, y):
return f"{x}\n{y}"
return self._operation_traverse(self.structured_plan, str, aggraget_str, combine_str)
def __eq__(self, o: object) -> bool:
""" Compares two execution plans
Return: True if the two plans are equal False otherwise
"""
# make sure 'o' is an execution-plan
if not isinstance(o, Execut
|
ionPlan):
return False
# get root for both plans
root_a = self.structured_plan
root_b = o.structured_plan
# compare execution trees
return self._compare_operations(root_a, root_b)
def _operation_traverse(self, op, op_f, aggregate_f, combine_f):
"""
Traverse operation tree recursively applying functions
Args:
op: operation to traverse
op_f: function applied for each operation
|
aggregate_f: aggregation function applied for all children of a single operation
combine_f: combine function applied for the operation result and the children result
"""
# apply op_f for each operation
op_res = op_f(op)
if len(op.children) == 0:
return op_res # no children return
else:
# apply _operation_traverse recursively
children = [self._operation_traverse(child, op_f, aggregate_f, combine_f) for child in op.children]
# combine the operation result with the children aggregated result
return combine_f(op_res, aggregate_f(children))
def _operation_tree(self):
""" Build the operation tree from the string representation """
# initial state
i = 0
level = 0
stack = []
current = None
# iterate plan operations
while i < len(self.plan):
current_op = self.plan[i]
op_level = current_op.count(" ")
if op_level == level:
# if the operation level equal to the current level
# set the current operation and move next
args = current_op.split("|")
current = Operation(args[0].strip(), None if len(args) == 1 else args[1].strip())
i += 1
elif op_level == level + 1:
# if the operation is child of the current operation
# add it as child and set as current operation
args = current_op.split("|")
child = Operation(args[0].strip(), None if len(args) == 1 else args[1].strip())
current.append_child(child)
stack.append(current)
current = child
level += 1
i += 1
elif op_level < level:
# if the operation is not child of current operation
# go back to it's parent operation
levels_back = level - op_level + 1
for _ in range(levels_back):
current = stack.pop()
level -= levels_back
else:
raise Exception("corrupted plan")
return stack[0]
|
webmedic/booker
|
src/gdata/blogger/client.py
|
Python
|
mit
| 6,686 | 0.005085 |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a client to communicate with the Blogger servers.
For documentation on the Blogger API, see:
http://code.google.com/apis/blogger/
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import gdata.client
import gdata.gauth
import gdata.blogger.data
import atom.data
import atom.http_core
# List user's blogs, takes a user ID, or 'default'.
BLOGS_URL = 'http://www.blogger.com/feeds/%s/blogs'
# Takes a blog ID.
BLOG_POST_URL = 'http://www.blogger.com/feeds/%s/posts/default'
# Takes a blog ID.
BLOG_PAGE_URL = 'http://www.blogger.com/feeds/%s/pages/default'
# Takes a blog ID and post ID.
BLOG_POST_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/%s/comments/default'
# Takes a blog ID.
BLOG_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/comments/default'
# Takes a blog ID.
BLOG_ARCHIVE_URL = 'http://www.blogger.com/feeds/%s/archive/full'
class BloggerClient(gdata.client.GDClient):
api_version = '2'
auth_service = 'blogger'
auth_scopes = gdata.gauth.AUTH_SCOPES['blogger']
def get_blogs(self, user_id='default', auth_token=None,
desired_class=gdata.blogger.data.BlogFeed, **kwargs):
return self.get_feed(BLOGS_URL % user_id, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetBlogs = get_blogs
def get_posts(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPostFeed, query=None,
**kwargs):
return self.get_feed(BLOG_POST_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPosts = get_posts
def get_pages(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPageFeed, query=None,
**kwargs):
return self.get_feed(BLOG_PAGE_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPages = get_pages
def get_post_comments(self, blog_id, post_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, desired_class=desired_class,
query=query, **kwargs)
GetPostComments = get_post_comments
def get_blog_comments(self, blog_id, auth_
|
token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_COMMENTS_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwarg
|
s)
GetBlogComments = get_blog_comments
def get_blog_archive(self, blog_id, auth_token=None, **kwargs):
return self.get_feed(BLOG_ARCHIVE_URL % blog_id, auth_token=auth_token,
**kwargs)
GetBlogArchive = get_blog_archive
def add_post(self, blog_id, title, body, labels=None, draft=False,
auth_token=None, title_type='text', body_type='html', **kwargs):
# Construct an atom Entry for the blog post to be sent to the server.
new_entry = gdata.blogger.data.BlogPost(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if labels:
for label in labels:
new_entry.add_label(label)
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_POST_URL % blog_id, auth_token=auth_token, **kwargs)
AddPost = add_post
def add_page(self, blog_id, title, body, draft=False, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.BlogPage(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_PAGE_URL % blog_id, auth_token=auth_token, **kwargs)
AddPage = add_page
def add_comment(self, blog_id, post_id, body, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.Comment(
content=atom.data.Content(text=body, type=body_type))
return self.post(new_entry, BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, **kwargs)
AddComment = add_comment
def update(self, entry, auth_token=None, **kwargs):
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing an update.
old_etag = entry.etag
entry.etag = None
response = gdata.client.GDClient.update(self, entry,
auth_token=auth_token, **kwargs)
entry.etag = old_etag
return response
Update = update
def delete(self, entry_or_uri, auth_token=None, **kwargs):
if isinstance(entry_or_uri, (str, atom.http_core.Uri)):
return gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing a delete.
old_etag = entry_or_uri.etag
entry_or_uri.etag = None
response = gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# TODO: if GDClient.delete raises and exception, the entry's etag may be
# left as None. Should revisit this logic.
entry_or_uri.etag = old_etag
return response
Delete = delete
class Query(gdata.client.Query):
def __init__(self, order_by=None, **kwargs):
gdata.client.Query.__init__(self, **kwargs)
self.order_by = order_by
def modify_request(self, http_request):
gdata.client._add_query_param('orderby', self.order_by, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
|
seatme/nucleon.amqp
|
nucleon/amqp/encoding.py
|
Python
|
lgpl-3.0
| 1,062 | 0.001883 |
from .spec import BASIC_PROPS_SET, encode_basic_properties
def encode_message(frame, headers, body, frame_size):
"""Encode message headers and body as a sequence of frames."""
for f in frame.encode():
yield f
props, headers = split_headers(headers, BASIC_PROPS_SET)
if headers:
props['headers'] = headers
yield encode_basic_properties(len(body), props)
for chunk in encode_body(body, frame_size):
yield chunk
def split_headers(user_headers, properties_set):
"""Split bitfield properties from named headers."""
props = {}
headers = {}
for key, value in user_headers.iteritems():
if key in properties_set:
props[key] = value
else:
headers[key] = value
return props, headers
def encode_body(body, frame_size):
"""Generate a sequence of chunks for body where each chunk is less than frame_size"""
limit = frame_size - 7 - 1 # spec is broken...
while body:
payload, body = body[:limit], body[limit:]
yi
|
eld (
|
0x03, payload)
|
indro/t2c
|
apps/external_apps/django_openid/admin.py
|
Python
|
mit
| 658 | 0.009119 |
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib import
|
admin
from django.contrib.admin.sites import NotRegistered
from models import UserOpenidAssociation
class OpenIDInline(admin.StackedInline):
model = UserOpenidAssociation
class UserAdminWithOpenIDs(UserAdmin):
inlines = [OpenIDInline]
# Add OpenIDs to the user admin, but only if User has been registered
try:
admin.site.unregister(User)
admin.site.register(User, UserAdminWithOpenIDs)
except NotRegistered:
pass
#from models import Nonce, Association
#admin.site.register
|
(Nonce)
#admin.site.register(Association)
|
instana/python-sensor
|
tests/apps/grpc_server/__init__.py
|
Python
|
mit
| 709 | 0.002821 |
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2019
import os
import sys
import time
import threading
if 'GEVENT_TEST' not in os.environ and 'CASSANDRA_TEST' not in os.environ and sys.version_info >= (3, 5, 3):
# Background RPC application
#
# Spawn the background RPC app that the tests will throw
# requests at.
import
|
tests.apps.grpc_server
from .stan_server import StanServicer
stan_servicer = StanServicer()
rpc_server_thread = threading.Thread(target=stan_servicer.start_server)
rpc_server_thread.daemon = True
rpc_server_thread.name = "Background RPC app"
print("Starting background RPC app...")
rpc_s
|
erver_thread.start()
time.sleep(1)
|
genome/flow-core
|
flow/shell_command/fork/commands/service.py
|
Python
|
agpl-3.0
| 747 | 0.001339 |
from flow.commands.service import ServiceCommand
from flow.configuration.inject.broker import BrokerConfiguration
from flow.configuration.inject.redis_conf import RedisConfiguration
from flow.configuration.inject.service_locator import ServiceLocatorConfiguration
from flow.shell_command
|
.fork.handler import ForkShellCommandMessageHandler
import logging
LOG = logging.getLogger(__name__)
class ForkShellCommand(ServiceCommand):
|
injector_modules = [
BrokerConfiguration,
RedisConfiguration,
ServiceLocatorConfiguration,
]
def _setup(self, *args, **kwargs):
self.handlers = [self.injector.get(ForkShellCommandMessageHandler)]
return ServiceCommand._setup(self, *args, **kwargs)
|
uni2u/neutron
|
neutron/plugins/openvswitch/common/config.py
|
Python
|
apache-2.0
| 4,210 | 0 |
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use.")),
cfg.BoolOpt('enable_tunneling', default=False,
help=_("Enable tunneling support.")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("Tunnel bridge to use.")),
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
help=_("Peer patch port in integration bridge for tunnel "
"bridge.")),
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
help=_("Peer patch port in tunnel bridge for integration "
"bridge.")),
cfg.StrOpt('local_ip', default='',
help=_("Local IP address of GRE tunnel endpoints.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>. "
"Deprecated for ofagent.")),
cfg.BoolOpt('use_veth_interconnection', default=False,
help=_("Use veths instead of patch ports to interconnect the "
"integration bridge to physical bridges.")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('minimize_polling',
default=True,
help=_("Minimize polling by monitoring ovsdb for interface "
"changes.")),
cfg.IntOpt('ovsdb_monitor_respawn_interval',
default=constants.DEFAULT_OVSDBMON_RESPAWN,
help=_("The number of seconds to wait before respawning the "
"ovsdb monitor after losing communication with it.")),
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
help=_("Network types supported by the agent "
"(gre and/or vxlan).")),
cfg.IntOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT,
help=_("The UDP port to use for VXLAN tunnels.")),
cfg.IntOpt('veth_mtu',
help=_("MTU size of veth interfaces")),
cfg.BoolOpt('l2_population', default=False,
help=_("Use ML2 l2population mechanism driver to learn "
"remote MAC and IPs and improve tunnel scalability.")),
cfg.BoolOpt('arp_responder', default=False,
help=_("Enable local ARP responder if it is supported. "
"Requires OVS 2.1 and ML2 l2population driver. "
"Allows the switch (when supporting an overlay) "
"to respond to an ARP request locally without "
"performing a costly ARP broadcast into the overlay.")),
cfg.BoolOpt('dont_fragment', default=True,
help=_("Set or un-set the don't fragment (DF) bit on "
"outgoing
|
IP packet carrying GRE/VXLAN tunnel.")),
cfg.BoolOpt('enable_distributed_routing', default=False,
help=_("Make the l2 agent run in DVR mode.")),
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
config.regist
|
er_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
|
LaoZhongGu/kbengine
|
kbe/src/lib/python/Tools/i18n/msgfmt.py
|
Python
|
lgpl-3.0
| 7,051 | 0.002979 |
#! /usr/bin/env python3
# Written by Martin v. Löwis <loewis@informatik.hu-berlin.de>
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation.
Usage: msgfmt.py [OPTIONS] filename.po
Options:
-o file
--output-file=file
Specify the output file to write to. If omitted, output will go to a
file named filename.mo (based off the input file name).
-h
--help
Print this message and exit.
-V
--version
Display version information and exit.
"""
import os
import sys
import ast
import getopt
import struct
import array
from email.parser import HeaderParser
__version__ = "1.1"
MESSAGES = {}
def usage(code, msg=''):
print(__doc__, file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def add(id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary."
global MESSAGES
if not fuzzy and str:
MESSAGES[id] = str
def generate():
"Return the generated output."
global MESSAGES
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def make(filename, outfile):
ID = 1
STR = 2
# Compute .mo name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.mo'
try:
lines = open(infile, 'rb').readlines()
except IOError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
section = None
fuzzy = 0
# Start off assuming Latin-1, so everything decodes without failure,
# until we know the exact encoding
encoding = 'latin-1'
# Parse the catalog
lno = 0
for l in lines:
l = l.decode(encoding)
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid') and not l.startswith('msgid_plural'):
if section == STR:
add(msgid, msgstr, fuzzy)
if not msgid:
# See whether there is an encoding declaration
p = HeaderParser()
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
if charset:
encoding = charset
section = ID
l = l[5:]
msgid = msgstr = b''
is_plural = False
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
print('msgid_plural not preceeded by msgid on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[12:]
msgid += b'\0' # separator of singular and plural
is_plural = True
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
if l.startswith('msgstr['):
if not is_plural:
print('plural without msgid_plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l.split(']', 1)[1]
if msgstr:
msgstr += b'\0' # Separator of the various plural forms
else:
if is_plural:
print('indexed msgstr required for plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
l = ast.literal_eval(l)
if section == ID:
msgid += l.encode(encoding)
elif section == STR:
msgstr += l.encode(encoding)
else:
print('Syntax error on %s:%d' % (infile, lno), \
'before:', file=sys.stderr)
print(l, file=sys.stderr)
sys.exit(1)
# Add last entry
if section == STR:
add(msgid, msgstr, fuzzy)
# Compute output
output = generate()
try:
open(outfile,"wb").write(output)
except IOError as msg:
print(msg, file=sys.stderr)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hVo:',
['help', 'version', 'output-file='])
except getopt.error as msg:
usage(1, msg)
outfile = None
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print("msgfmt.py", __version__, file=sys.stderr)
sys.exit(0)
elif opt in ('-o', '--output-file'):
outfile = arg
# do it
if not args:
print('No input file given', file=sys.stderr)
print("Try `msgfmt --help' for more informati
|
on.", file=sys.stderr)
return
for filename in args:
make(filen
|
ame, outfile)
if __name__ == '__main__':
main()
|
fafhrd91/mdl
|
mdl/registry.py
|
Python
|
apache-2.0
| 3,760 | 0 |
from __future__ import absolute_import
import zope.interface.interface
from zope.interface.adapter import AdapterLookup as _AdapterLookup
from zope.interface.adapter import AdapterRegistry as _AdapterRegistry
from zope.interface.registry import Components, ComponentLookupError
__all__ = ('Registry',)
NO_CONTRACTS = 0
USE_CONTRACTS = 1
USE_CONTRACTS_WARN = 2
class AdapterLookup(_AdapterLookup):
def lookup(self, required, provided, name=u'', default=None):
factory = super(AdapterLookup, self).lookup(
required, provided, name=name, default=default)
if factory is None or self._registry.level == NO_CONTRACTS:
return factory
contract = getattr(provided, '__contract__', None)
if contract is not None:
return contract.bind_adapter(factory, self._registry.logger)
return factory
class AdapterRegistry(_AdapterRegistry):
level = NO_CONTRACTS
logger = None
LookupClass = AdapterLookup
def __init__(self, bases=(), logger=None):
self.logger = logger
super(AdapterRegistry, self).__init__(bases=bases)
def enable_contracts(self, level):
self.level = level
class Registry(Components):
""" Registry """
def __init__(self, name='', bases=(),
use_contracts=NO_CONTRACTS, flavor=None, logger=None):
self._use_contracts = use_contracts
self._flavor = flavor
self._logger = logger
super(Registry, self).__init__(name, bases)
def _init_registries(self):
self.adapters = AdapterRegistry(logger=self._logger)
self.utilities = AdapterRegistry(logger=self._logger)
@property
def flavor(self):
return self._flavor
def enable_contracts(self, warn_only=False):
if warn_only:
self._use_contracts = USE_CONTRACTS_WARN
self.adapters.enable_contracts(USE_CONTRACTS_WARN)
else:
self._use_contracts = USE_CONTRACTS
self.adapters.enable_contracts(USE_CONTRACTS)
def _adapter_hook(self, interface, object, name='', default=None):
return self.queryAdapter(object, in
|
terface, name, default)
def install(self, use_contracts=False):
zope.interface.interface.adapter_hooks.append(self._adapter_hook)
if use_contracts:
self.enable_contracts()
def uninstall(self):
if self._adapter_hook in
|
zope.interface.interface.adapter_hooks:
zope.interface.interface.adapter_hooks.remove(self._adapter_hook)
def queryAdapter(self, object, interface, name=u'', default=None):
if isinstance(object, (tuple, list)):
adapter = self.adapters.queryMultiAdapter(
object, interface, name, default)
else:
adapter = self.adapters.queryAdapter(
object, interface, name, default)
if self._use_contracts == NO_CONTRACTS:
return adapter
contract = getattr(interface, 'contract', None)
if contract and adapter is not None:
return contract(adapter, logger=self._logger)
return adapter
def getAdapter(self, object, interface, name=u''):
adapter = self.adapters.queryAdapter(object, interface, name)
if adapter is None:
raise ComponentLookupError(object, interface, name)
if self._use_contracts == NO_CONTRACTS:
return adapter
contract = getattr(interface, 'contract', None)
if contract:
return contract(adapter, logger=self._logger)
return adapter
def __enter__(self):
self.install()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.uninstall()
return False
|
luiscberrocal/django-test-tools
|
tests/mixins.py
|
Python
|
mit
| 993 | 0.001007 |
import json
import os
class TestFixtureMixin(object):
def get_json_data(self, filename):
import environ
full_path = (environ.Path(__file__) - 1).root
fixture_path = None
max_levels = 4
current_level = 1
while fixture_path is None:
new_path = '{}{}{}'.format(full_path, os.sep, 'fixtures')
if os.path.exists(new_path):
|
fixture_path = new_path
else:
full_path = os.path.split(full_path)[0]
if current_level == max_levels:
break
current_
|
level += 1
if fixture_path is None:
started_at = (environ.Path(__file__) - 1).root
raise ValueError('Could not find fixtures folder in {}'.format(started_at))
json_filename = '{}{}{}'.format(fixture_path, os.sep, filename)
with open(json_filename, 'r', encoding='utf-8') as jfile:
json_data = json.load(jfile)
return json_data
|
albertz/music-player
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsmetadata.py
|
Python
|
bsd-2-clause
| 2,524 | 0.003566 |
from Foundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSMetaData (TestCase):
def testConstants(self):
self.assertIsInstance(NSMetadataQueryDidStartGatheringNotification, unicode)
self.assertIsInstance(NSMetadataQueryGatheringProgressNotification, unicode)
self.assertIsInstance(NSMetadataQueryDidFinishGatheringNotification, unicode)
self.assertIsInstance(NSMetadataQueryDidUpdateNotification, unicode)
self.assertIsInstance(NSMetadataQueryResultContentRelevanceAttribute, unicode)
self.assertIsInstance(NSMetadataQueryUserHomeScope, unicode)
self.assertIsInstance(NSMetadataQueryLocalComputerScope, unicode)
self.assertIsInstance(NSMetadataQueryNetworkScope, unicode)
@min_os_level('10.7')
def testConstants10_7(self):
self.assertIsInstance(NSMetadataQueryLocalDocumentsScope, unicode)
self.assertIsInstance(NSMetadataQueryUbiquitousDocumentsScope, unicode)
self.assertIsInstance(NSMetadataQueryUbiquitousDataScope, unicode)
self.assertIsInstance(NSMetadataItemFSNameKey, unicode)
self.assertIsInstance(NSMetadataItemDisplayNameKey, unicode)
self.assertIsInstance(NSMetadataItemURLKey, unicode)
self.assertIsInstance(NSMetadataItemPathKey, unicode)
self.assertIsInstance(NSMetadataItemFSSizeKey, unicode)
self.assertIsInstance(NSMetadataItemFSCreationDateKey, unicode)
self.assertIsInstance(NSMetadataItemFSContentChangeDateKey, unicode)
self.assertIsInstance(NSMetadataItemIsUbiquitousKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemHasUnresolvedConflictsKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsDownloadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousIt
|
emIsDownloadingKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsUploadedKey, unicode)
|
self.assertIsInstance(NSMetadataUbiquitousItemIsUploadingKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemPercentDownloadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemPercentUploadedKey, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSMetadataQuery.startQuery)
self.assertResultIsBOOL(NSMetadataQuery.isStarted)
self.assertResultIsBOOL(NSMetadataQuery.isGathering)
self.assertResultIsBOOL(NSMetadataQuery.isStopped)
if __name__ == "__main__":
main()
|
fbradyirl/home-assistant
|
homeassistant/components/rest/binary_sensor.py
|
Python
|
apache-2.0
| 4,542 | 0.000881 |
"""Support for RESTful binary sensors."""
import logging
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorDevice,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from .sensor import RestData
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Binary Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): {cv.string: cv.string},
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(["POST", "GET"]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discove
|
ry_info=None):
"""Set up the REST binary sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
ti
|
meout = config.get(CONF_TIMEOUT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
rest.update()
if rest.data is None:
raise PlatformNotReady
# No need to update the sensor now because it will determine its state
# based in the rest resource that has just been retrieved.
add_entities([RestBinarySensor(hass, rest, name, device_class, value_template)])
class RestBinarySensor(BinarySensorDevice):
"""Representation of a REST binary sensor."""
def __init__(self, hass, rest, name, device_class, value_template):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._device_class = device_class
self._state = False
self._previous_data = None
self._value_template = value_template
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return the availability of this sensor."""
return self.rest.data is not None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
response = self.rest.data
if self._value_template is not None:
response = self._value_template.async_render_with_possible_json_value(
self.rest.data, False
)
try:
return bool(int(response))
except ValueError:
return {"true": True, "on": True, "open": True, "yes": True}.get(
response.lower(), False
)
def update(self):
"""Get the latest data from REST API and updates the state."""
self.rest.update()
|
pyaiot/pyaiot
|
utils/mqtt/mqtt-test-node.py
|
Python
|
bsd-3-clause
| 5,525 | 0 |
import json
import logging
import asyncio
import random
import socket
from hbmqtt.client import MQTTClient, ClientException
from hbmqtt.mqtt.constants import QOS_1
logging.basicConfig(format='%(asctime)s - %(name)14s - '
'%(levelname)5s - %(message)s')
logger = logging.getLogger("mqtt_test_node")
MQTT_URL = 'mqtt://localhost:1886/'
NODE_ID = 'mqtt_test_node'
LED_VALUE = '0'
DELAY_CHECK = 30 # seconds
def pressure_value():
return '{}°hPa'.format(random.randrange(990, 1015, 1))
NODE_RESOURCES = {'name': {'delay': 0,
'value': lambda x=None: "MQTT test node"},
'os': {'delay': 0,
'value': lambda x=None: "riot"},
'ip': {'delay': 0,
'value': (lambda x=None:
socket.gethostbyname(
socket.gethostname()))},
'board': {'delay': 0, 'value': lambda x=None: "HP"},
'led': {'delay': 0,
'value': lambda x=None: LED_VALUE},
'temperature': {'delay': 5,
'value': (lambda x=None:
'{}°C'
.format(random.randrange(
20, 30, 1)))},
'pressure': {'delay': 10,
'value': (lambda x=None:
'{}hPa'
.format(random.randrange(
990, 1015, 1)))}
}
async def send_check(mqtt_client):
while True:
check_data = json.dumps({'id': NODE_ID})
asyncio.get_event_loop().create_task(publish(
mqtt_client, 'node/check', check_data))
await asyncio.sleep(DELAY_CHECK)
def send_values(mqtt_client):
for resource in NODE_RESOURCES:
topic = 'node/{}/{}'.format(NODE_ID, resource)
delay = NODE_RESOURCES[resource]['delay']
value = NODE_RESOURCES[resource]['value']
asyncio.get_event_loop().create_task(
publish_continuous(mqtt_client, topic, value, delay))
async def start_client():
"""Connect to MQTT broker and subscribe to node check resource."""
global __LED_VALUE__
mqtt_client = MQTTClient()
await mqtt_client.connect(MQTT_URL)
# Subscribe to 'gateway/check' with QOS=1
await mqtt_client.subscribe([('gateway/{}/discover'
.format(NODE_ID), QOS_1)])
await mqtt_client.subscribe([('gateway/{}/led/set'
.format(NODE_ID), QOS_1)])
asyncio.get_event_loop().create_task(send_check(mqtt_client))
asyncio.get_event_loop().create_task(send_values(mqtt_client))
while True:
try:
logger.debug("Waiting for incoming MQTT messages
|
from gateway")
# Blocked here until a message is received
message = await mqtt_client.deliver_message()
except ClientException as ce:
logger.error("Client exception: {}".format(ce))
|
break
except Exception as exc:
logger.error("General exception: {}".format(exc))
break
packet = message.publish_packet
topic_name = packet.variable_header.topic_name
data = packet.payload.data.decode()
logger.debug("Received message from gateway: {} => {}"
.format(topic_name, data))
if topic_name.endswith("/discover"):
if data == "resources":
topic = 'node/{}/resources'.format(NODE_ID)
value = json.dumps(list(NODE_RESOURCES.keys())).encode()
asyncio.get_event_loop().create_task(
publish(mqtt_client, topic, value))
else:
for resource in NODE_RESOURCES:
topic = 'node/{}/{}'.format(NODE_ID, resource)
value = NODE_RESOURCES[resource]['value']
msg = json.dumps({'value': value()})
asyncio.get_event_loop().create_task(
publish(mqtt_client, topic, msg))
elif topic_name.endswith("/led/set"):
LED_VALUE = data
topic = 'node/{}/led'.format(NODE_ID)
data = json.dumps({'value': data}, ensure_ascii=False)
asyncio.get_event_loop().create_task(
publish(mqtt_client, topic, data.encode()))
else:
logger.debug("Topic not supported: {}".format(topic_name))
async def publish(mqtt_client, topic, value):
if hasattr(value, 'encode'):
value = value.encode()
await mqtt_client.publish(topic, value, qos=QOS_1)
logger.debug("Published '{}' to topic '{}'".format(value.decode(), topic))
async def publish_continuous(mqtt_client, topic, value, delay=0):
while True:
data = json.dumps({'value': value()}, ensure_ascii=False)
await mqtt_client.publish(topic, data.encode('utf-8'), qos=QOS_1)
logger.debug("Published '{}' to topic '{}'".format(data, topic))
if delay == 0:
break
await asyncio.sleep(delay)
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
try:
asyncio.get_event_loop().run_until_complete(start_client())
except KeyboardInterrupt:
logger.info("Exiting")
asyncio.get_event_loop().stop()
|
BD2KGenomics/toil-old
|
src/toil/batchSystems/combinedBatchSystem.py
|
Python
|
mit
| 4,039 | 0.013122 |
#!/usr/bin/env python
#Copyright (C) 2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import time
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
class CombinedBatchSystem(AbstractBatchSystem):
"""Takes two batch systems and a choice function to decide which to issue to.
"""
def __init__(self, config, batchSystem1, batchSystem2, batchSystemChoiceFn):
AbstractBatchSystem.__init__(self, config, 0, 0) #Call the parent constructor
self.batchSystem1 = batchSystem1
self.batchSystem2 = batchSystem2
self.batchSystemChoiceFn = batchSystemChoiceFn
def _jobIDForBatchSystem1(self, id):
return (1, id)
def _isJobIDForBatchSystem1(self, id):
return id[0] == 1
def _jobIDForBatchSystem2(self, id):
return (2, id)
def _isJobIDForBatchSystem2(self, id):
return id[0] == 2
def _strip(self, id):
return id[1]
def issueBatchJob(self, command, memory, cpu):
if self.batchSystemChoiceFn(command, memory, cpu):
return self._jobIDForBatchSystem1(self.batchSystem1.issueBatchJob(command, memory, cpu))
else:
return self._jobIDForBatchSystem2(self.batchSystem2.issueBatchJob(command, memory, cpu))
def killBatchJobs(self, jobIDs):
l, l2 = [], []
for jobID in jobIDs:
if self._isJobIDForBatchSystem1(jobID):
l.append(self._strip(jobID))
else:
assert self._isJobIDForBatchSystem2(jobID)
l2.append(self._strip(jobID))
self.batchSystem1.killBatchJobs(l)
self.batchSystem2.killBatchJobs(l2)
def getIssuedBatchJobIDs(self):
return [ self._jobIDForBatchSystem1(id) for id in self.batchSystem1.getIssuedBatchJobIDs() ] + [ self._jobIDForBatchSystem2(id) for id in self.batchSystem2.getIssuedBatchJobIDs() ]
def getRunningBatchJobIDs(self):
return [ self._jobIDForBatchSystem1(id) for id in self.batchSystem1.getRunningBatchJobIDs() ] + [ self._jobIDForBatchSystem2(id) for id in self.batchSystem2.getRunningBatchJobIDs() ]
def getUpdatedBatchJob(self, maxWait):
endTime = time.time() + maxWait
while 1:
updatedJob = self.batchSystem2.getUpdatedBatchJob(0) #Small positive values of wait seem to
if updatedJob != None:
return (self._jobIDForBatchSystem2(updatedJob[0]), updatedJob[1])
updatedJob = self.batchSystem1.getUpdatedBatchJob(0)
if updatedJob != None:
return (self._jobIDForBatchSystem1(updatedJob[0]), updatedJob[1])
remaining = endTime - time.time()
if remaining <= 0:
return None
time.sleep(0.01)
# FIXME: This should be a static method
def getRescueBatchJobFrequency(self):
return min(self.batchSystem1.getRescueBatchJobFrequency(), self.batchSystem2.getRescueBatchJobFrequency())
|
naelstrof/PugBot-Discord-Django
|
botapi/apache/override.py
|
Python
|
mit
| 66 | 0 |
f
|
rom botapi.settings import
|
*
DEBUG = True
ALLOWED_HOSTS = ['*']
|
asmi92/odu-acm
|
raffleslave.py
|
Python
|
mit
| 1,668 | 0.020983 |
import tweepy
from pymongo import MongoClient
class RaffleSlave:
"The class responsbile for a single raffle, a new instance for each individual raffle"
Params = None
api = None
alive = True
def __init__(self, hashtag, max, id, owner ):
self.Params = {}
self.Params['max'] = max
self.Params['hashtag'] = hashtag
self.Params[ '_id' ] = id
self.Params[ 'owner' ] = owner
auth = tweepy.OAuthHandler( '5Xr8HX71XetZYmGV86AmcEgVo', '85ql1GsrOLTRre0AqqprX9Xtm5SkMOWzJk9OVJPRiLM8bm72JA' )
auth.set_access_token( '832250876551110658-MLGfJUjJH6Ktwlf51AQQlSO9QPcp3ew', 'UvCcyNqwH3X7u2KfRWeYvlOWxN2k1ONfjrlpxRK1Shj33' )
self.api = tweepy.API( a
|
uth )
def update(self):
public_tweets = self.api.search( '@'+self.Params['owner']+' #'+self.Params['hashtag'] )
client = MongoClient()
db = client.raftl
tweetcollection = db.tweets
followers = self.api.followers_ids(self.Params['owner'])
for tweet in public_tweets:
|
tweetcollection.update_one( {'_id':tweet.id}, {'$set': {'_id':tweet.id, 'user_id':tweet.author.id, 'following':tweet.author.id in followers,'raffle_id':self.Params['_id'], 'body':tweet.text, 'username':tweet.author.screen_name, 'profile_img':tweet.author.profile_image_url_https } }, True )
#tweetcollection.update_one( {'_id':tweet.id}, {'$set': {'_id':tweet.id, 'user_id':tweet.author.id, 'following':True,'raffle_id':self.Params['_id'], 'body':tweet.text },'$unset':{'drawn':"" } }, True )
def getParams(self):
return self.Params
def checkAlive(self):
return self.alive
|
redhatrises/freeipa
|
ipaclient/remote_plugins/2_164/passwd.py
|
Python
|
gpl-3.0
| 2,428 | 0.000824 |
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
|
if six.
|
PY3:
unicode = str
__doc__ = _("""
Set a user's password
If someone other than a user changes that user's password (e.g., Helpdesk
resets it) then the password will need to be changed the first time it
is used. This is so the end-user is the only one who knows the password.
The IPA password policy controls how often a password may be changed,
what strength requirements exist, and the length of the password history.
EXAMPLES:
To reset your own password:
ipa passwd
To change another user's password:
ipa passwd tuser1
""")
register = Registry()
@register()
class passwd(Command):
__doc__ = _("Set a user's password.")
takes_args = (
parameters.Str(
'principal',
cli_name='user',
label=_(u'User name'),
default_from=DefaultFrom(lambda : None),
# FIXME:
# lambda: krb_utils.get_principal()
autofill=True,
no_convert=True,
),
parameters.Password(
'password',
label=_(u'New Password'),
confirm=True,
),
parameters.Password(
'current_password',
label=_(u'Current Password'),
default_from=DefaultFrom(lambda principal: None, 'principal'),
# FIXME:
# lambda principal: get_current_password(principal)
autofill=True,
),
)
takes_options = (
parameters.Password(
'otp',
required=False,
label=_(u'OTP'),
doc=_(u'One Time Password'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
bool,
doc=_(u'True means the operation was successful'),
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
|
lavalamp-/ws-backend-community
|
lib/sqlalchemy/models/auth.py
|
Python
|
gpl-3.0
| 178 | 0 |
# -*- coding: utf-8 -*-
from __future__ impo
|
rt absolute_import
import rest.models
from .base import from_django_model
WsAuthGroup = from_django_model(rest.models.WsAut
|
hGroup)
|
FaustineMaffre/GossipProblem-PDDL-generator
|
neggoalsparser/parser.py
|
Python
|
mit
| 3,303 | 0.003633 |
""" Parser for the negative goals.
Allows to parse sets of the form, e.g., {i-j-k : i!=j & j!=k & i!=k} U
{i,j : i<3 & j<=3}, meaning that i should not know whether j knows the secret
of k, for i, j, k distinct, and that i should not know the secret of j for
either i = 1, 2 and j = 1, 2, 3.
Also allows instantiated negative goals of the form {1-2-3, 1-3}, meaning that 1
should not know whether 2 knows the secret of 3 and the secret of 3 (equivalent
to {i-j-k : i=1 & j=2 & k=3} U {i-j : i=1 & j=3}).
"""
from pyp
|
eg2 import *
import re
""" Description of the grammar of negative goals.
Comp ::= = | != | <= | >= | < | >
Int ::= <integer>
AgtName ::= <lower-case letter> | AgtName<lower-case letter> | AgtName<digit>
Cst ::= AgtName Com
|
p AgtName | AgtName Comp Int
Csts ::= Cst | Csts & Csts
Agts ::= AgtName | Agts-Agts
AgtsInst ::= Int | AgtsInst-AgtsInst
AgtsInsts ::= AgtsInst | AgtsInsts, AgtsInsts
Set ::= {Agts : Csts} | {AgtsInsts}
Sets ::= Set | Sets U Sets
"""
""" Comparison operator.
"""
class Comp(str):
grammar = re.compile(r'(=|!=|<=|>=|<|>)')
""" Integer.
"""
class Int(int):
grammar = attr('nb', re.compile(r'[1-9]\d*'))
""" Name of an agent: a lower case letter possibly followed by lower case
letters and numbers.
"""
class AgtName(str):
grammar = attr('name', re.compile(r'[a-z]([a-z]|[0-9])*'))
""" Simple constraint: a comparison between two agents or an agent name and an
integer.
"""
class Cst(List):
grammar = AgtName, Comp, [AgtName, Int]
def __repr__(self):
return self[0].name + ' ' + self[1] + ' ' + \
(self[2].name if type(self[2]) == AgtName else self[2].nb)
""" Conjunction of constraints, separared by '&'.
"""
class Csts(List):
grammar = csl(Cst, separator='&')
def __repr__(self):
return ' & '.join(str(cst) for cst in self)
""" Sequence of agents, separated by '-'.
"""
class Agts(List):
grammar = csl(AgtName, separator='-')
def __repr__(self):
return '-'.join(i.name for i in self)
""" Sequence of 'instantiated' agents (that is, integers), separated by '-'.
"""
class AgtsInst(List):
grammar = csl(Int, separator='-')
def __repr__(self):
return '-'.join(i.nb for i in self)
""" Several sequences of instantiated agents, separated by ','.
"""
class AgtsInsts(List):
grammar = csl(AgtsInst, separator=',')
def __repr__(self):
return ', '.join(str(ai) for ai in self)
""" Set: either agents followed by constraints (specified by ':'), or sequences
of instantiated agents, separated by ','.
"""
class Set(List):
grammar = '{', [(Agts, ':', Csts), AgtsInsts], '}'
def __repr__(self):
return '{' + str(self[0]) + \
(' : ' + str(self[1]) if type(self[0]) == Agts else '') + '}'
""" Union of sets, separated by 'U'.
"""
class Sets(List):
grammar = csl(Set, separator='U')
def __repr__(self):
return ' U '.join(str(s) for s in self)
""" Parses a string as Sets.
"""
def parseSet(s):
try:
res = parse(s, Sets)
except SyntaxError:
print('Error: syntax error in negative goals.')
sys.exit(1)
return res
# test1 = '{i1-j-k : i1>=1 & j<2} U {i-j: i!=j} U {i}'
# test2 = '{1-10-6}'
# test3 = test1 + ' U ' + test2
# ast = parse(test3, Sets)
# print(ast)
|
prontointern/django-contact-form
|
django_contact_form_project/contacts/tests/test_views.py
|
Python
|
mit
| 14,146 | 0.000141 |
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import Contact
class ContactViewTest(TestCase):
def setUp(self):
self.url = reverse('contact')
self.response = self.client.get(self.url)
def test_contact_view_is_accessible(self):
self.assertEqual(self.response.status_code, 200)
def test_contact_view_should_have_form_tag(self):
expected = '<form action="." method="post">'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_firstname_input(self):
expected = '<label>Firstname: '
self.assertContains(self.response, expected, status_code=200)
expected = '<input id="id_firstname" maxlength="100" name="firstname" '
expected += 'type="text" />'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_lastname_and_input(self):
expected = '<label>Last Name:</label>'
self.assertContains(self.response, expected, status_code=200)
expected = '<input id="id_lastname" maxlength="100" name="lastname" '
expected += 'type="text" />'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_email_and_input(self):
expected = '<label>Email:</label>'
self.assertContains(self.response, expected, status_code=200)
expected = '<input id="id_email" maxlength="100" name="email" '
expected += 'type="email" />'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_have_submit_button(self):
expected = '<input type="submit" value="Submit">'
self.assertContains(self.response, expected, status_code=200)
def test_contact_view_should_accessible_by_post(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code, 200)
@patch('contacts.views.GeoIP')
def test_submit_contact_data_successfully(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': 'john@smith.com'
}
self.client.post(self.url, data=data)
contact = Contact.objects.get(firstname='John')
self.assertEqual(contact.firstname, 'John')
self.assertEqual(contact.lastname, 'Smith')
self.assertEqual(contact.email, 'john@smith.com')
self.assertEqual(contact.ip, '58.137.162.34')
self.assertEqual(contact.lat, '13.754')
self.assertEqual(contact.lng, '100.5014')
def test_submit_contact_data_without_firstname_should_not_save_data(self):
data = {
'firstname': '',
'lastname': 'Smith',
'email': 'john@smith.com'
}
self.client.post(self.url, data=data)
contact_count = Contact.objects.filter(lastname='Smith').count()
self.assertEqual(contact_count, 0)
def test_submit_contact_data_without_lastname_should_not_save_data(self):
data = {
'firstname': 'John',
'lastname': '',
'email': 'john@smith.com'
}
self.client.post(self.url, data=data)
contact_count = Contact.objects.all().count()
self.assertEqual(contact_count, 0)
def test_submit_contact_data_without_email_should_not_save_data(self):
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': ''
}
self.client.post(self.url, data=data)
contact_count = Contact.objects.filter(lastname='Smith').count()
self.assertEqual(contact_count, 0)
def test_submit_contact_data_without_firstname_should_get_error_message(
self):
data = {
'firstname': '',
'lastname': 'Smith',
'email': 'john@smith.com'
}
response = self.client.post(self.url, data=data)
expected = 'This field is required.'
self.assertContains(response, expected, status_code=200)
def test_submit_contact_data_without_email_should_get_error_message(
self
):
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': ''
}
response = self.client.post(self.url, data=data)
expected = 'This field is required.'
self.assertContains(response, expected, status_code=200)
def test_submit_contact_data_without_lastname_should_get_error_message(
self
):
data = {
'firstname': 'John',
'lastname': '',
'email': 'john@smith.com'
}
response = self.client.post(self.url, data=data)
expected = 'This field is required.'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_redirect_to_thank_you_page_successfully(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'John',
'lastname':
|
'Smith',
'email': 'john@smith.com'
}
response = self.client.post(
|
self.url,
data=data
)
self.assertRedirects(
response,
'/thankyou/?firstname=John',
status_code=302,
target_status_code=200
)
@patch('contacts.views.GeoIP')
def test_redirected_page_should_contain_firstname(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country_code": "TH",
"isp": "CS LOXINFO PUBLIC COMPANY LIMITED",
"country": "Thailand",
"country_code3": "THA",
"region_code": "40"
}
data = {
'firstname': 'John',
'lastname': 'Smith',
'email': 'john@smith.com'
}
response = self.client.post(
self.url,
data=data,
follow=True
)
expected = 'Firstname: John'
self.assertContains(response, expected, status_code=200)
@patch('contacts.views.GeoIP')
def test_thank_you_page_should_contain_lastname(self, mock):
mock.return_value.getGeoIP.return_value = {
"longitude": 100.5014,
"latitude": 13.754,
"asn": "AS4750",
"offset": "7",
"ip": "58.137.162.34",
"area_code": "0",
"continent_code": "AS",
"dma_code": "0",
"city": "Bangkok",
"timezone": "Asia/Bangkok",
"region": "Krung Thep",
"country
|
SimonDevon/simple-python-shapes
|
name-draw1.py
|
Python
|
mit
| 605 | 0.006612 |
import turtle
import random
# Let's create our turtle and call him Simon!
simon = turtle.Turtle()
# We'll set the background to black
turtle.bgcolor("black")
# This is our list of colours
colors = ["red", "green", "blue"]
# We need to ask the user their name
name = turtle.tex
|
tinput("Name", "What is your name?")
simon.penup()
for number in range(30): # We'll draw the name 30 times
|
simon.forward(number * 10)
simon.write(name, font=("Arial", 12, "bold")) # This writes the name and chooses a font
simon.right(92)
simon.pencolor(random.choice(colors)) # This chooses a random colour
|
manjaro/thus
|
thus/misc/validation.py
|
Python
|
gpl-3.0
| 5,581 | 0.000896 |
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Miscellaneous validation of user-entered data
#
# Copyright (C) 2005 Junta de Andalucía
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd.
# Copyright (C) 2015 Manjaro (http://manjaro.org)
#
# Validation library.
# Created by Antonio Olmo <aolmo#emergya._info> on 26 jul 2005.
from gi.repository import Gtk
def check_grub_device(device):
"""Check that the user entered a valid boot device.
@return True if the device is valid, False if it is not."""
import re
import os
regex = re.compile(r'^/dev/([a-zA-Z0-9]+|mapper/[a-zA-Z0-9_]+)$')
if regex.search(device):
if not os.path.exists(device):
return False
return True
# (device[,part-num])
regex = re.compile(r'^\((hd|fd)[0-9]+(,[0-9]+)*\)$')
if regex.search(device):
return True
else:
return False
NAME_LENGTH = 1
NAME_BADCHAR = 2
NAME_BADHYPHEN = 3
NAME_BADDOTS = 4
def check(element, value):
if element == 'username':
return check_username(value)
if element == 'hostname':
return check_hostname(value)
def check_username(name):
""" Check the correctness o
|
f a proposed user name.
@return empty list (valid) or list of:
- C{NAME_LENGTH} wrong length.
- C{NAME_BADCHAR} contains invalid characters.
- C{NAME_BADHYPHEN} starts or ends with a hyphen.
- C{NAME_BADDOTS} contains consecutive/initial/final dots."""
import re
result = set()
if len
|
(name) < 1 or len(name) > 40:
result.add(NAME_LENGTH)
regex = re.compile(r'^[a-z0-9.\-]+$')
if not regex.search(name):
result.add(NAME_BADCHAR)
if name.startswith('-') or name.endswith('-'):
result.add(NAME_BADHYPHEN)
if '.' in name:
result.add(NAME_BADDOTS)
return sorted(result)
def check_hostname(name):
""" Check the correctness of a proposed host name.
@return empty list (valid) or list of:
- C{NAME_LENGTH} wrong length.
- C{NAME_BADCHAR} contains invalid characters.
- C{NAME_BADHYPHEN} starts or ends with a hyphen.
- C{NAME_BADDOTS} contains consecutive/initial/final dots."""
import re
result = set()
if len(name) < 1 or len(name) > 63:
result.add(NAME_LENGTH)
regex = re.compile(r'^[a-zA-Z0-9.-]+$')
if not regex.search(name):
result.add(NAME_BADCHAR)
if name.startswith('-') or name.endswith('-'):
result.add(NAME_BADHYPHEN)
if '..' in name or name.startswith('.') or name.endswith('.'):
result.add(NAME_BADDOTS)
return sorted(result)
# Based on setPasswordStrength() in Mozilla Seamonkey, which is tri-licensed
# under MPL 1.1, GPL 2.0, and LGPL 2.1.
def password_strength(password):
upper = lower = digit = symbol = 0
for char in password:
if char.isdigit():
digit += 1
elif char.islower():
lower += 1
elif char.isupper():
upper += 1
else:
symbol += 1
length = len(password)
if length > 5:
length = 5
if digit > 3:
digit = 3
if upper > 3:
upper = 3
if symbol > 3:
symbol = 3
strength = (
((length * 0.1) - 0.2) +
(digit * 0.1) +
(symbol * 0.15) +
(upper * 0.1))
if strength > 1:
strength = 1
if strength < 0:
strength = 0
return strength
def human_password_strength(password):
strength = password_strength(password)
length = len(password)
if length == 0:
hint = ''
color = ''
elif length < 6:
hint = _('Password is too short')
color = 'darkred'
elif strength < 0.5:
hint = _('Weak password')
color = 'darkred'
elif strength < 0.75:
hint = _('Fair password')
color = 'darkorange'
elif strength < 0.9:
hint = _('Good password')
color = 'darkgreen'
else:
hint = _('Strong password')
color = 'darkgreen'
return hint, color
def check_password(password, verified_password, password_ok,
password_false, password_error_label,
password_strength, icon_ok, icon_warning,
allow_empty=False):
complete = True
passw = password.get_text()
vpassw = verified_password.get_text()
if passw != vpassw:
complete = False
password_ok.hide()
if passw and (len(vpassw) / float(len(passw)) > 0.8):
txt = _("Passwords do not match")
txt = '<small><span foreground="darkred"><b>{0}</b></span></small>'.format(txt)
password_error_label.set_markup(txt)
password_error_label.show()
else:
password_error_label.hide()
if allow_empty:
password_strength.hide()
elif not passw:
password_strength.hide()
complete = False
else:
(txt, color) = human_password_strength(passw)
txt = '<small><span foreground="{0}"><b>{1}</b></span></small>'.format(color, txt)
password_strength.set_markup(txt)
password_strength.show()
if passw == vpassw:
password_ok.set_from_icon_name(icon_ok, Gtk.IconSize.LARGE_TOOLBAR)
password_ok.show()
password_false.hide()
else:
password_false.set_from_icon_name(icon_warning, Gtk.IconSize.LARGE_TOOLBAR)
password_false.show()
password_ok.hide()
return complete
|
googleads/googleads-adxseller-examples
|
python/v2.0/generate_report.py
|
Python
|
apache-2.0
| 2,740 | 0.006204 |
#!/usr/bin/python
# coding: utf-8
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file e
|
xcept in compliance with the License.
# You may obtain a copy
|
of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieves a saved report, or a report for the specified ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: reports.generate
"""
__author__ = 'api.Dean.Lukies@gmail.com (Dean Lukies)'
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'--ad_client_id',
help='The ID of the ad client for which to generate a report')
argparser.add_argument(
'--report_id',
help='The ID of the saved report to generate')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'adexchangeseller', 'v2.0', __doc__, __file__, parents=[argparser],
scope='https://www.googleapis.com/auth/adexchange.seller.readonly')
# Process flags and read their values.
ad_client_id = flags.ad_client_id
saved_report_id = flags.report_id
try:
# Retrieve report.
if saved_report_id:
result = service.accounts().reports().saved().generate(
savedReportId=saved_report_id, accountId='myaccount').execute()
elif ad_client_id:
result = service.accounts().reports().generate(
accountId='myaccount',
startDate='2014-07-01', endDate='2014-08-01',
filter=['AD_CLIENT_ID==' + ad_client_id],
metric=['PAGE_VIEWS', 'AD_REQUESTS', 'AD_REQUESTS_COVERAGE',
'CLICKS', 'AD_REQUESTS_CTR', 'COST_PER_CLICK',
'AD_REQUESTS_RPM', 'EARNINGS'],
dimension=['DATE'],
sort=['+DATE']).execute()
else:
argparser.print_help()
sys.exit(1)
# Display headers.
for header in result['headers']:
print '%25s' % header['name'],
print
# Display results.
for row in result['rows']:
for column in row:
print '%25s' % column,
print
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
TIY-Durham/TIY-Overflow
|
api/overflow/urls.py
|
Python
|
gpl-3.0
| 1,416 | 0.000706 |
"""overflow URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog
|
import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
# from rest_framework import routers
from rest_framework_nested import routers
from stackoverflow import views
router = routers.SimpleRouter()
router.register(r'ques
|
tions', views.QuestionViewSet)
router.register(r'users', views.UserViewSet)
questions_router = routers.NestedSimpleRouter(router, r'questions', lookup='question')
questions_router.register(r'answers', views.AnswerViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls)),
url(r'^api/', include(questions_router.urls)),
url(r'^docs/', include('rest_framework_swagger.urls')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
|
ytanay/thinglang
|
thinglang/parser/statements/break_statement.py
|
Python
|
mit
| 770 | 0.003896 |
from thinglang.compiler.opcodes import OpcodeJump
from thinglang.parser.blocks.conditional import Conditional
from thinglang.parser.blocks.loop import Loop
from thinglang.parser.nodes import BaseNode
class BreakStatement(BaseNode):
"""
Jumps to the end of the currently executing loop or conditional
"""
EMITTABLE = True
MUST_CLOSE = False
def __init__(self, raw, source_ref):
super().__init__([])
self.source_ref = source_ref
|
def compile(self, context): # TODO: assert no children
container = self.ascend(Loop)
if not container:
raise Exception('Cannot break outside of
|
loop') # TODO: should be StructureError
context.append(OpcodeJump(context.jump_out[container]), self.source_ref)
|
khalibartan/pgmpy
|
pgmpy/factors/discrete/__init__.py
|
Python
|
mit
| 236 | 0 |
from .Disc
|
reteFactor import State, DiscreteFactor
from .CPD import TabularCPD
from .JointProbabilityDistribution import JointProbabilityDistribution
__all__ = ['TabularCPD',
'DiscreteFactor',
'State'
|
]
|
utarsuno/urbtek
|
nexus_django/nexus_front_end/apps.py
|
Python
|
apache-2.0
| 103 | 0 |
from django.apps
|
import AppConfig
class NexusFrontEndConfig(AppConfig):
name = 'nexus_fro
|
nt_end'
|
sysadminmatmoz/ingadhoc
|
sale_exceptions_ignore_approve/__openerp__.py
|
Python
|
agpl-3.0
| 1,775 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Sale Exceptions Ingore Approve Directly",
'version': '8.0.1.0.0',
'category': 'Product',
'sequence': 14,
'author': 'ADHOC SA',
'website': 'www.adhoc.com.a
|
r',
'license': 'AGPL-3',
'summary': 'Allow to define purchase prices on different currencies using\
replenishment cost field',
"description": """
Sale Exceptions Ingore Approve Directly
=======================================
When Ignoring a sale Exception, approve directly the sale order
""",
"depends": [
"sale_exceptions",
],
'external_dependencies': {
},
"data": [
'wizard/sale_exce
|
ption_confirm_view.xml',
'views/sale_view.xml',
],
'demo': [
],
'test': [
],
"installable": True,
'auto_install': False,
'application': False,
}
|
PressLabs/silver
|
silver/tests/unit/test_invoice.py
|
Python
|
apache-2.0
| 8,323 | 0.00036 |
# Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from datetime import date, timedelta
from decimal import Decimal
from six.moves import zip
from django.test import TestCase
from silver.models import DocumentEntry, Proforma, Invoice
from silver.tests.factories import (ProformaFactory, InvoiceFactory,
DocumentEntryFactory, CustomerFactory)
class TestInvoice(TestCase):
def test_pay_invoice_related_proforma_state_change_to_paid(self):
proforma = ProformaFactory.create()
proforma.issue()
proforma.create_invoice()
assert proforma.related_document.state == Invoice.STATES.ISSUED
proforma.related_document.pay()
assert proforma.related_document.state == Invoice.STATES.PAID
assert proforma.state == Proforma.STATES.PAID
def test_clone_invoice_into_draft(self):
invoice = InvoiceFactory.create()
invoice.issue()
invoice.pay()
entries = DocumentEntryFactory.create_batch(3)
invoice.invoice_entries.add(*entries)
clone = invoice.clone_into_draft()
assert clone.state == Invoice.STATES.DRAFT
assert clone.paid_date is None
assert clone.issue_date is None
assert clone.related_document is None
assert (clone.series != invoice.series or
clone.number != invoice.number)
assert clone.sales_tax_percent == invoice.sales_tax_percent
assert clone.sales_tax_name == invoice.sales_tax_name
assert not clone.archived_customer
assert not clone.archived_provider
assert clone.customer == invoice.customer
assert clone.provider == invoice.provider
assert clone.currency == invoice.currency
assert clone._last_state == clone.state
assert clone.pk != invoice.pk
assert clone.id != invoice.id
assert not clone.pdf
assert clone.invoice_entries.count() == 3
assert invoice.invoice_entries.count() == 3
entry_fields = [entry.name for entry in DocumentEntry._meta.get_fields()]
for clone_entry, original_entry in zip(clone.invoice_entries.all(),
invoice.invoice_entries.all()):
for entry in entry_fields:
if entry not in ('id', 'proforma', 'invoice'):
assert getattr(clone_entry, entry) == \
getattr(original_entry, entry)
assert invoice.state == Invoice.STATES.PAID
def test_cancel_issued_invoice_with_related_proforma(self):
proforma = ProformaFactory.create()
proforma.issue()
if not proforma.related_document:
proforma.create_invoice()
proforma.related_document.cancel()
assert proforma.related_document.state == proforma.state == Invoice.STATES.CANCELED
def _get_decimal_places(self, number):
return max(0, -number.as_tuple().exponent)
def test_invoice_total_decimal_points(self):
invoice_entries = DocumentEntryFactory.create_batch(3)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
assert self._get_decimal_places(invoice.total) == 2
def test_invoice_total_before_tax_decimal_places(self):
invoice_entries = DocumentEntryFactory.create_batch(3)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
invoice.sales_tax_percent = Decimal('20.00')
assert self._get_decimal_places(invoice.total_before_tax) == 2
def test_invoice_tax_value_decimal_places(self):
invoice_entries = DocumentEntryFactory.create_batch(3)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
invoice.sales_tax_percent = Decimal('20.00')
assert self._get_decimal_places(invoice.tax_value) == 2
def test_invoice_total_with_tax_integrity(self):
invoice_entries = DocumentEntryFactory.create_batch(5)
invoice = InvoiceFactory.create(invoice_entries=invoice_entries)
invoice.sales_tax_percent = Decimal('20.00')
self.assertEqual(invoice.total, invoice.total_before_tax + invoice.tax_value)
def test_draft_invoice_series_number(self):
invoice = InvoiceFactory.create()
invoice.number = None
assert invoice.series_number == '%s-draft-id:%d' % (invoice.series,
invoice.pk)
invoice.series = None
assert invoice.series_number == 'draft-id:%d' % invoice.pk
def test_issues_invoice_series_number(self):
invoice = InvoiceFactory.create(state=Invoice.STATES.ISSUED)
assert invoice.series_number == '%s-%s' % (invoice.series,
invoice.number)
def test_invoice_due_today_queryset(self):
invoices = InvoiceFactory.create_batch(5)
invoices[0].due_date = date.today()
invoices[0].save()
invoices[1].due_date = date.today()
invoices[1].issue()
invoices[2].due_date = date.today() - timedelta(days=1)
invoices[2].issue()
invoices[2].pay()
invoices[3].due_date = date.today()
invoices[3].issue()
invoices[3].cancel()
invoices[4].due_date = date.today() + timedelta(days=1)
invoices[4].issue()
queryset = Invoice.objects.due_today()
assert queryset.count() == 1
assert invoices[1] in queryset
def test_invoice_due_this_month_queryset(self):
invoices = InvoiceFactory.create_batch(4)
invoices[0].due_date = date.today().replace(day=20)
invoices[0].issue()
invoices[1].due_date = date.today().replace(day=1)
invoices[1].issue()
invoices[2].due_date = date.today() - timedelta(days=31)
invoices[2].issue()
invoices[3].issue()
invoices[3].cancel()
queryset = Invoice.objects.due_this_month()
assert queryset.count() == 2
for invoice in invoices[:2]:
|
assert invoice in queryset
def test_invoice_overdue_queryset(self):
invoices = InvoiceFactory.create_batch(3)
invoices[0].due_date = date.today() - timedelta(days=1)
invoices[0].issue()
invoices[1].due_date = date.today() - timedelta(days=3)
invoice
|
s[1].issue()
invoices[2].due_date = date.today() - timedelta(days=31)
invoices[2].issue()
invoices[2].pay()
queryset = Invoice.objects.overdue()
assert queryset.count() == 2
for invoice in invoices[:2]:
assert invoice in queryset
def test_invoice_overdue_since_last_month_queryset(self):
invoices = InvoiceFactory.create_batch(3)
invoices[0].due_date = date.today().replace(day=1)
invoices[0].issue()
invoices[1].due_date = date.today() - timedelta(days=31)
invoices[1].issue()
queryset = Invoice.objects.overdue_since_last_month()
assert queryset.count() == 1
assert invoices[1] in queryset
def test_customer_currency_used_for_transaction_currency(self):
customer = CustomerFactory.create(currency='EUR')
invoice = InvoiceFactory.create(customer=customer,
transaction_currency=None)
self.assertEqual(invoice.transaction_currency, 'EUR')
def test_invoice_currency_used_for_transaction_currency(self):
customer = CustomerFactory.create(currency=None)
invoice = InvoiceFactory.create(customer=customer,
cur
|
msullivan/advent-of-code
|
2021/14balt.py
|
Python
|
mit
| 1,674 | 0 |
#!/usr/bin/env python3
"""
Find characters deep in the expanded string, for fun.
"""
import sys
from collections import Counter
def real_step(s, rules):
out = ""
for i in range(len(s)):
out += s[i]
k = s[i:i+2]
if k in rules:
out += rules[k]
return out
def step(cnt, rules):
ncnt = Counter()
for k, v in cnt.items():
if k in rules:
c = rules[k]
ncnt[k[0] + c] += v
ncnt[c + k[1]] += v
else:
ncnt[k] += v
return ncnt
def size(s, n, rules):
cnt = Counter(s[i:i+2] for i in range(len(s)-1))
for _ in range(n):
cnt = step(cnt, rules)
lcnt = Counter(s[0])
for k, v in cnt.items():
lcnt[k[1
|
]] += v
return sum(lcnt.values())
def get_char(s, idx, iters, rules):
for i in range(iters):
h = len(s) // 2
first = s[:h+1]
sz = size(first, iters - i, rules)
if idx < sz:
s = real_step(first, rules)
else:
s = real_step(s[h:], rules)
idx -= sz - 1
return s[idx]
def main(args):
data = [s.strip() for s in sys.stdin]
s = data[0]
rules = dict(x.split(" -> ") for x
|
in data[2:])
# Make sure it works
t = s
for i in range(4):
t = real_step(t, rules)
for idx in range(len(t)):
c = get_char(s, idx, 4, rules)
assert t[idx] == c
# find some random characters deep into it
print(size(s, 40, rules))
start = 7311752324710
out = ""
for i in range(10):
out += get_char(s, start + i, 40, rules)
print(out)
if __name__ == '__main__':
main(sys.argv)
|
damiendr/callipy
|
setup.py
|
Python
|
bsd-2-clause
| 1,056 | 0.004735 |
#!/usr/bin/env python
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
readme = path.join(here, 'README.md')
try:
|
from pypandoc import convert
long_description = convert(readme, 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
with open(readme, 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='callipy',
description='Calling IPython notebooks with arguments',
long_description=long_description,
version='0.3.2',
author='Damien Drix',
|
author_email='damien.drix+pypi@gmail.com',
url='https://github.com/damiendr/callipy',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: IPython',
],
py_modules=['callipy'],
install_requires=[
"runipy",
"ipython",
],
)
|
landlab/landlab
|
landlab/components/profiler/profiler.py
|
Python
|
mit
| 13,854 | 0.000217 |
# coding: utf8
# ! /usr/env/python
"""profiler.py component to create profiles with user-defined endpoints."""
from collections import OrderedDict
import numpy as np
from matplotlib import cm, colors, pyplot as plt
from landlab.components.profiler.base_profiler import _BaseProfiler
class Profiler(_BaseProfiler):
"""Extract and plot profiles set up using points within a grid.
The profile is constructed from the first to final point in ``endpoints``.
Endpoints are located at grid nodes. Two successive endpoints bound a
profile segment. A profile with one segment is a straight line. The
segments of a profile with multiple segments meet at endpoints. The grid
nodes along the profile are sampled, including the segment endpoints. The
extracted quantity of the node is retained. No interpolation is conducted
even for profile traces that reside between nodes.
The structure of the profile in a model grid is diagrammed below. The grid
contains nine columns and nine rows. The profile is constructed from three
endpoints that bound two segments. Here, ``o`` indicates a segment
endpoint, ``.`` and ``*`` are sample nodes of the first and second segment,
respectively. ``X`` are nodes not included in the profile. The first
segment begins in the lower-left and continues horizontally and almost
reaches the right boundary. The second segment is joined to the first in
the lower-right of the grid and it continues diagonally to the upper-left.
Segments have seven sample points each (nodes at endpoints are also
sampled). The segments share the second endpoint. Segment and sample
ordering is dictated by the ordering of endpoints. If the horizontal
segment is the first segment, the endpoints used to construct this profile
must be ordered: lower-left, lower-right, and then upper-left.::
X X X X X X X X X
X o X X X X X X X
X X * X X X X X X
X X X * X X X X X
X X X X * X X X X
X X X X X * X X X
X X X X X X * X X
X o . . . . . o X
X X X X X X X X X
The node IDs and distances along the profile are stored in a data structure
called ``data_structure``. It is a dictionary with keys indicating the
segment IDs that are enumerated along the profile.
By default, a unique color will be assigned to each segment. To change the
color, a user can change values stored in ``data_structure``. Additionally,
a ``cmap`` keyword argument can provide some user control over the color at
the instantiation of the component.
The data structure of the example above will look as follows:
.. code-block:: python
{0: {
'ids': [10, 11, 12, 13, 14, 15, 16],
'distances': [0, 1, 2, 3, 4, 5, 6]
'color': (0.27, 0, 0.33, 1)
},
1: {
'ids': [16, 24, 32, 40, 48, 56, 64],
'distances': [6, 7.41, 8.83, 10.24, 11.66, 13.07, 14.49]
'color': (0.13, 0.57, 0.55, 1)
}
}
Examples
--------
Create a model grid with the same dimensions as the diagram above.
>>> from landlab import RasterModelGrid
>>> from landlab.components import Profiler
>>> import numpy as np
>>> mg = RasterModelGrid((10, 10), 10)
>>> mg.at_node['topographic__elevation'] = mg.node_x * mg.node_y
Create a profile with three endpoints. This profile is laid out the same as
the diagram above.
>>> endpoints = [10, 16, 64]
>>> profiler = Profiler(mg, endpoints)
>>> profiler.run_one_step()
The keys of the data structure are the segment ids.
>>> profiler.data_structure.keys()
odict_keys([0, 1])
The data structure contains data of segment samples. Below is the first
segment.
>>> profiler.data_structure[0]['ids']
array([10, 11, 12, 13, 14, 15, 16])
>>> profiler.data_structure[0]['distances']
array([ 0., 10., 20., 30., 40., 50., 60.])
>>> np.round(profiler.data_structure[0]['color'], decimals=2)
array([ 0.27, 0. , 0.33, 1. ])
Note that the first node of the second segment is the same as the final
node of the first segment.
>>> profiler.data_structure[1]['ids']
array([16, 26, 35, 45, 54, 64])
Alternative to nodes, profiles can be instantiated with coordinates.
>>> profiler = Profiler(mg, [(10, 10), (70, 10), (10, 70)])
Endpoints can also be set with a combination of coordinates and nodes.
>>> profiler = Profiler(mg, [(10, 10), 16, (10, 70)])
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
None Listed
"""
_name = "Profiler"
_unit_agnostic = True
def __init__(self, grid, endpoints, cmap="viridis"):
"""Instantiate Profiler.
Parameters
----------
grid : RasterModelGrid
A landlab RasterModelGrid.
endpoints : list of node id integers or coordinate tuples
The endpoints that bound segments of the profile. Endpoints can be
node ids and/or tuples of coordinates (x, y, where these
coordinates are the measurement from the grid lower-left). The list
can be a mix of node ids and coordinate tuples. The profile begins
with the first element of `endpoints` and continues in the order of
this list.
cmap : str
A valid matplotlib cmap string. Default is "viridis".
"""
super().__init__(grid)
self._cmap = plt.get_cmap(cmap)
if not isinstance(endpoints, list) or len(endpoints) < 2:
msg = (
"`endpoints` must be a list of at least 2 node IDs or a "
"list of at least two tuples where each tuple contains the "
"x, y coordinates of endpoints."
)
raise ValueError(msg)
# Check if `endpoints` are within grid bounds while setting
# `_end_nodes`.
self._end_nodes = []
for point in endpoints:
node, _ = self._get_node_and_coords(point)
self._end_nodes.append(node)
@property
def data_structure(self):
"""OrderedDict defining the profile.
The node IDs and distances along the profile are stored in
``data_structure``. It is a dictionary with keys of the segment ID.
The value of each key is itself a dictionary of the segment attributes.
First, 'ids' contains a list of the node IDs of segment samples ordered
from the start to the end of the segment. It includes the endpoints.
Second, 'distances' contains a list of along-profile distances that
mirrors the list in 'ids'. Finally, 'color' is an RGBA tuple indicating
the color for the segment.
"""
return self._data_struct
def _create_profile_structure(self):
"""Create the data structure of the profile.
The prof
|
ile is
|
processed by segment. Segments are bound by successive
endpoints. The cumulative distance along the profile is accumulated by
iteratively adding segment lengths.
"""
self._data_struct = OrderedDict()
grid = self._grid
endnodes = self._end_nodes
cum_dist = 0
for i_endpt in range(len(endnodes) - 1):
# Get the endpoints and samples of the segment.
start_node, start_xy = self._get_node_and_coords(endnodes[i_endpt])
end_node, end_xy = self._get_node_and_coords(endnodes[i_endpt + 1])
sample_nodes = self._get_sample_nodes(start_node, end_node)
# Calculate the along-profile distance of samples along the
# segment.
n_samples = len(sample_nodes)
sample_distances = np.empty(n_samples, dtype=float)
for i_sample, node in enumerate(sample_nodes):
sample_xy = grid.xy_of_node[node]
pt = self._project_point_onto_line(sample_xy, start_xy, end_xy)
d = grid.calc_distances_of_nodes_to_point(pt, node_subse
|
alessiamarcolini/deepstreet
|
utils/format_validation_filenames_util.py
|
Python
|
mit
| 408 | 0.002451 |
import os
with open("validation_classes.csv", "r") as f:
rows = f.readlines()
rows = rows[1:-1]
rows = [x for x in rows if x != "\n"]
path = "dataset/val/"
for row i
|
n rows:
rsplit = row.split(";")
filename = rsplit[0]
c = int(rsplit[1])
new_filename = format(c,'05d') + "_" + filename
if os.path.exists(path + filename):
os.rename(path + filename, path + new_filename)
| |
devfirefly/PyFlax
|
Tween.py
|
Python
|
mit
| 4,128 | 0.03125 |
import math
def linear(x):
return x
def quad(x):
return x*x
def quadout(x):
return 1 -quad(x)
def cubic(x):
return x*x*x
def cubicout(x):
return 1 - cubic(x)
def quint(x):
return x*x*x*x
def quintout(x):
return 1-quint(x)
def sine(x):
return -math.cos(p * (math.pi * .5)) + 1
def sineout(x):
return 1-sine(x)
def cosine(x):
return -math.sine(p*(math.pi *.5)) + 1
def cosineout(x):
return 1-cosine(x)
ease = {
"linear":linear,
"quad":quad,
"quad-out":quadout,
"cubic":cubic,
"cubic-out":cubicout,
"quint":quint,
"quint-out":quintout,
"sin
|
e":sine,
"sine-out":sineout,
"cosine":cosine,
"cosine-out":cosineout,
}
def findDistance(x,y):
if not x or not y:
return 0
else:
return max(x,y)-min(x,y)
class single:
def __init__(self,time,item,exp,mode="linear"):
self.progress = 0
self.rate = time > 0 and 1 / time or 0
self.start
|
= item
self.current = item
self.diff = exp-item
self.mode = mode
self.exp = exp
self.done = False
self.delay = 0
self.initt = 0
def get(self):
return self.current
def update(self,dt):
self.progress = self.progress + self.rate * dt
p = self.progress
x = p >= 1 and 1 or ease[self.mode](p)
self.current = self.start + x*self.diff
if p > 1:
self.done = True
class _to:
def __init__(self,time,obj,var,mode="Linear",done = None,parent):
self.tweens = []
self.var = var
self.obj = obj
self.done = False
self.onComplete = done
self.initt = 0
self.parent = parent
#key val
for i,v in var.items():
if type(v) == int:
item = single(time,getattr(obj,i),v)
list.insert(self.tweens,len(self.tweens)+1,item)
elif type(v) == list:
t = getattr(obj,i)
if type(v) == list:
items = v
no = 0
for var in v:
item = single(time,getattr(t[no]),var)
list.insert(self.tweens,len(self.tweens)+1,item)
no += 1
else:
print("The item: " + v +" for " + i + " is not a number or a list!")
def update(self,dt):
if self.initt > self.delay:
no = 0
items = []
for i,v in self.var.items():
self.tweens[no].update(dt)
setattr(self.obj,i,self.tweens[no].get())
if self.tweens[no].done:
items.insert(len(items)+1,i)
no = no +1
no = 0
for item in self.tweens:
if item.done:
self.tweens.remove(item)
no = no +1
for item in items:
self.var.pop(item, None)
if len(self.tweens) == 0:
self.done = True
if self._after:
self = self._after(self)
else:
if self.onComplete:
self.onComplete()
else:
self.initt += dt
pass
def after(time,var,mode="linear"):
self._after = _to(time,self.obj,var,mode,False,self.parent)
list.insert(self.parent.tweens,len(self.parent.tweens)+1,self._after)
return self._after
def delay(t):
self.delay = t
def stop(self):
list.remove(self.parent.tweens,self)
pass
class Tween():
def __init__(self):
self.tweens = []
pass
# VAR HAS TO BE DICT WITH STR:EXPVAL
def to(self,time,obj,var,mode="Linear",func=None):
mode = mode or "linear"
t = _to(time,obj,var,mode,func,self)
list.insert(self.tweens,len(self.tweens)+1,t)
return
def update(self,dt):
for tween in self.tweens:
tween.update(dt)
if tween.done:
self.tweens.remove(tween)
pass
|
hxsf/OnlineJudge
|
problem/views.py
|
Python
|
mit
| 12,819 | 0.001668 |
# coding=utf-8
import zipfile
import re
import os
import hashlib
import json
import logging
from django.shortcuts import render
from django.db.models import Q, Count
from django.core.paginator import Paginator
from rest_framework.views import APIView
from django.conf import settings
from account.models import SUPER_ADMIN
from account.decorators import super_admin_required
from utils.shortcuts import (serializer_invalid_response, error_response,
success_response, paginate, rand_str, error_page)
from .serizalizers import (CreateProblemSerializer, EditProblemSerializer, ProblemSerializer,
ProblemTagSerializer, CreateProblemTagSerializer)
from .models import Problem, ProblemTag
from .decorators import check_user_problem_permission
logger = logging.getLogger("app_info")
def problem_page(request, problem_id):
"""
前台题目详情页
"""
try:
problem = Problem.objects.get(id=problem_id, visible=True)
except Problem.DoesNotExist:
return error_page(request, u"题目不存在")
return render(request, "oj/problem/problem.html", {"problem": problem, "samples": json.loads(problem.samples)})
class ProblemTagAdminAPIView(APIView):
"""
获取所有标签的列表
"""
def get(self, request):
return success_response(ProblemTagSerializer(ProblemTag.objects.all(), many=True).data)
class ProblemAdminAPIView(APIView):
@super_admin_required
def post(self, request):
"""
题目发布json api接口
---
request_serializer: CreateProblemSerializer
response_serializer: ProblemSerializer
"""
serializer = CreateProblemSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
problem = Problem.objects.create(title=data["title"],
description=data["description"],
input_description=data["input_description"],
output_description=data["output_description"],
test_case_id=data["test_case_id"],
source=data["source"],
samples=json.dumps(data["samples"]),
time_limit=data["time_limit"],
memory_limit=data["memory_limit"],
difficulty=data["difficulty"],
created_by=request.user,
hint=data["hint"],
visible=data["visible"])
for tag in data["tags"]:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
return success_response(ProblemSerializer(problem).data)
else:
return serializer_invalid_response(serializer)
@check_user_problem_permission
def put(self, request):
"""
题目编辑json api接口
---
request_serializer: EditProblemSerializer
response_serializer: ProblemSerializer
"""
serializer = EditProblemSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
problem = Problem.objects.get(id=data["id"])
problem.title = data["title"]
problem.description = data["description"]
problem.input_description = data["input_description"]
problem.output_description = data["output_description"]
problem.test_case_id = data["test_case_id"]
problem.source = data["source"]
problem.time_limit = data["time_limit"]
problem.memory_limit = data["memory_limit"]
problem.difficulty = data["difficulty"]
problem.samples = json.dumps(data["samples"])
problem.hint = data["hint"]
problem.visible = data["visible"]
# 删除原有的标签的对应关系
problem.tags.remove(*problem.tags.all())
# 重新添加所有的标签
for tag in data["tags"]:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
problem.save()
return success_response(ProblemSerializer(problem).data)
else:
return serializer_invalid_response(serializer)
def get(self, request):
"""
题目分页json api接口
---
response_serializer: ProblemSerializer
"""
problem_id = request.GET.get("problem_id", None)
if problem_id:
try:
# 普通管理员只能获取自己创建的题目
# 超级管理员可以获取全部的题目
problem = Problem.objects.get(id=problem_id)
if request.user.admin_type != SUPER_ADMIN:
problem = problem.get(created_by=request.user)
return success_response(ProblemSerializer(problem).data)
except Problem.DoesNotExist:
return error_response(u"题目不存在")
# 获取问题列表
problems = Problem.objects.all().order_by("-create_time")
if request.user.admin_type != SUPER_ADMIN:
problems = problems.filter(created_by=request.user)
visible = request.GET.get("visible", None)
if visible:
problems = problems.filter(visible=(visible == "true"))
keyword = request.GET.get("keyword", None)
if keyword:
problems = problems.filter(Q(title__contains=keyword) |
Q(description__contains=keyword))
return paginate(request, problems, ProblemSerializer)
class TestCaseUploadAPIView(APIView):
"""
上传题目的测试用例
"""
def _is_legal_test_case_file_name(self, file_name):
# 正整数开头的 .in 或者.out 结尾的
regex = r"^[1-9]\d*\.(in|out)$"
return re.compile(regex).match(fi
|
le_name) is not None
def post(self, request):
if "file" not in request.FILES:
return error_response(u"文件上传失败")
f = request.FILES["file"]
tmp
|
_zip = "/tmp/" + rand_str() + ".zip"
try:
with open(tmp_zip, "wb") as test_case_zip:
for chunk in f:
test_case_zip.write(chunk)
except IOError as e:
logger.error(e)
return error_response(u"上传失败")
test_case_file = zipfile.ZipFile(tmp_zip, 'r')
name_list = test_case_file.namelist()
l = []
# 如果文件是直接打包的,那么name_list 就是["1.in", "1.out"]这样的
# 如果文件还有一层文件夹test_case,那么name_list就是["test_case/", "test_case/1.in", "test_case/1.out"]
# 现在暂时只支持第一种,先判断一下是什么格式的
# 第一种格式的
if "1.in" in name_list and "1.out" in name_list:
for file_name in name_list:
if self._is_legal_test_case_file_name(file_name):
name = file_name.split(".")
# 有了.in 判断对应的.out 在不在
if name[1] == "in":
if (name[0] + ".out") in name_list:
l.append(file_name)
else:
return error_response(u"测试用例文件不完整,缺少" + name[0] + ".out")
else:
# 有了.out 判断对应的 .in 在不在
if (name[0] + ".in") in name_list:
l.append(file_name)
else:
return error_response(u"测试用例文件不完整,缺少" + name[0] + ".in")
problem_test_dir = rand_str()
test_case_dir = settings.TEST_CASE_DIR + problem_test_dir + "/"
# 得到了合法的测试用例文件列表 然后去解压缩
os.mkdir(test_case_dir)
for name in l:
f = open(test_case_dir + name, "wb")
try:
f.write(test_case_file.read(n
|
xmbcrios/xmbcrios.repository
|
script.module.urlresolver/lib/urlresolver/plugins/limevideo.py
|
Python
|
gpl-2.0
| 3,459 | 0.005204 |
'''
Limevideo urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
from lib import jsunpack
from lib import captcha_lib
class LimevideoResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "limevideo"
domains = [ "limevideo.net" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
data.update({'method_free': 'Continue to Video'})
html = self.net.http_POST(url, data).content
r = re.findall(r'type="hidden" name="(.+?)" value="(.+?)">', html)
for name, value in r:
data[nam
|
e] = value
data.update(captcha_lib.do_captcha(html))
html = self.net.http_POST(url, data).content
sPattern = '<script type=(?:"|\')text/javascript(?:"|\')>(eval\('
sPa
|
ttern += 'function\(p,a,c,k,e,d\)(?!.+player_ads.+).+np_vid.+?)'
sPattern += '\s+?</script>'
r = re.search(sPattern, html, re.DOTALL + re.IGNORECASE)
if r:
sJavascript = r.group(1)
sUnpacked = jsunpack.unpack(sJavascript)
sPattern = '<embed id="np_vid"type="video/divx"src="(.+?)'
sPattern += '"custommode='
r = re.search(sPattern, sUnpacked)
if r:
return r.group(1)
else:
num = re.compile('false\|(.+?)\|(.+?)\|(.+?)\|(.+?)\|divx').findall(html)
for u1, u2, u3, u4 in num:
urlz = u4 + '.' + u3 + '.' + u2 + '.' + u1
pre = 'http://' + urlz + ':182/d/'
preb = re.compile('custommode\|(.+?)\|(.+?)\|182').findall(html)
for ext, link in preb:
r = pre + link + '/video.' + ext
return r
def get_url(self, host, media_id):
return 'http://www.limevideo.net/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/([0-9a-zA-Z]+)', url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?limevideo.net/' +
'[0-9A-Za-z]+', url) or
'limevideo' in host)
|
quxiaolong1504/dpark
|
tests/test_serialize.py
|
Python
|
bsd-3-clause
| 590 | 0.008475 |
import unittest
from dpark.serialize import dump_closure, load_c
|
losure
class TestSerialize(unitte
|
st.TestCase):
def testNameError(self):
def foo():
print x
dumped_func = dump_closure(foo)
func = load_closure(dumped_func)
self.assertRaises(NameError, func)
x = 10
def testNoneAsFreeVar(self):
y = None
x = 10
def foo():
return (x, y)
dumped_func = dump_closure(foo)
func = load_closure(dumped_func)
self.assertEqual(func(), (x, y))
|
cjaymes/pyscap
|
src/scap/model/xs/ExtensionType.py
|
Python
|
gpl-3.0
| 2,037 | 0.003927 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xs import *
from scap.model.xs.AnnotatedType import AnnotatedType
logger = logging.getLogger(__name__)
class ExtensionType(AnnotatedType):
MODEL_MAP = {
'elements': [
{'tag_name': 'group', 'list': 'tags', 'class': 'GroupType', 'min': 0},
{'tag_name': 'all', 'list': 'tags', 'class': 'AllType', 'min': 0},
{'tag_name': 'choice', 'list': 'tags', 'class': 'ChoiceElement', 'min': 0},
{'tag_name': 'sequence', 'list': 'tags', 'class': 'GroupType', 'min': 0},
{'tag_name': 'attribute', 'list': 'tags', 'class': 'AttributeType', 'min': 0, 'max'
|
: None},
{'tag_name': 'attributeGroup', 'list': 'tags', 'class': 'AttributeGroupType', 'min': 0, 'max': None},
{'tag_name': 'anyAttribute', 'list': 'tags', 'class': 'WildcardType', 'min': 0},
],
'attributes': {
|
'base': {'type': 'QNameType', 'required': True},
}
}
def get_defs(self, schema, top_level):
logger.debug('Base: ' + self.base)
# TODO unable to map xmlns because ET doesn't retain it
base_ns, base_name = [self.base.partition(':')[i] for i in [0,2]]
top_level.set_super_module(base_ns)
top_level.set_super_class(base_name)
return super(ExtensionType, self).get_defs(schema, top_level)
|
djtaylor/cloudscape-DEPRECATED
|
python/cloudscape/engine/api/core/request.py
|
Python
|
gpl-3.0
| 15,200 | 0.010724 |
import os
import re
import json
import importlib
# Django Libraries
from django.http import HttpResponse, HttpResponseServerError
# CloudScape Libraries
from cloudscape.common import config
from cloudscape.common import logger
from cloudscape.common.vars import T_BASE
from cloudscape.engine.api.base import APIBase
from cloudscape.common.utils import JSONTemplate
from cloudscape.engine.api.auth.key import APIKey
from cloudscape.engine.api.auth.acl import ACLGateway
from cloudscape.common.utils import valid, invalid
from cloudscape.engine.api.auth.token import APIToken
from cloudscape.engine.api.app.auth.models import DBAuthEndpoints
from cloudscape.engine.api.app.user.models import DBUserDetails
# Configuration / Logger
CONF = config.parse()
LOG = logger.create('cloudscape.engine.api.core.request', CONF.server.log)
def dispatch(request):
"""
The entry point for all API requests. Called for all endpoints from the Django
URLs file. Creates a new instance of the EndpointManager class, and returns any
HTTP response to the client that opened the API request.
:param request: The Django request object
:type request: object
:rtype: object
"""
try:
# Return the response from the endpoint handler
return EndpointManager(request).handler()
# Critical server error
except Exception as e:
LOG.exception('Internal server error: %s' % str(e))
# Return a 500 error
return HttpResponseServerError('Internal server error, please contact your administrator.')
class EndpointManager:
"""
The endpoint request manager class. Serves as the entry point for all API request,
both for authentication requests, and already authenticated requests. Constructs
the base API class, loads API utilities, and performs a number of other functions
to prepare the API for the incoming request.
The EndpointManager class is instantiated by the dispatch method, which is called
by the Django URLs module file. It is initialized with the Django request object.
"""
def __init__(self, request):
self.request_raw = request
# Request properties
self.method = None
self.request = None
self.endpoint = None
self.action = None
self.path = None
# Request endpoint handler
self.handler_obj = None
# API parameters
self.api_name = None
self.api_mod = None
self.api_class = None
self.api_user = None
self.api_group = None
# API base object
self.api_base = None
# Request error
def _req_error(self, err):
err_response = {
'message': 'An error occured when processing the API request',
'endpoint': self.endpoint,
'error': err
}
LOG.error('%s:%s' % (self.endpoint, err))
return HttpResponse(json.dumps(err_response), content_type='application/json', status=400)
def _authenticate(self):
"""
Authenticate the API request.
"""
# Set the API user and group
self.api_user = self.request['api_user']
self.api_group = None if not ('api_group' in self.request) else self.request['api_group']
LOG.info('Authenticating API user: %s, group=%s' % (self.api_user, repr(self.api_group)))
# Authenticate key for token requests
if self.endpoint == 'auth/get':
auth_status = APIKey().validate(self.request)
if not auth_status['valid']:
return self._req_error(auth_status['content'])
LOG.info('API key authentication successfull for user: %s' % self.api_user)
# Authenticate token for API requests
else:
if not APIToken().validate(self.request):
return self._req_error('Failed to validate API token for user \'%s\'' % self.api_user)
LOG.info('API token authentication successfull for user: %s' % self.api_user)
# Check for a user account
if DBUserDetails.objects.filter(username=self.api_user).count():
# If no API group was supplied
if not self.api_group:
return self._req_error('User accounts must supply a group UUID when making a request using the <api_group> parameter')
# Make sure the group exists and the user is a member
is_member = False
for group in DBUserDetails.objects.get(username=self.api_user).get_groups():
if group['uuid'] == self.api_group:
is_member = True
break
# If the user is not a member of the group
if not is_member:
return self._req_error('User account <%s> is not a member of group <%s>' % (self.api_user, self.api_group))
# Validate the request
def _validate(self):
# Request body / method
self.request = json.loads(self.request_raw.body)
self.method = self.request_raw.META['REQUEST_METHOD']
# Make sure a request action is set
|
if not 'action' in self.request:
return self._req_error('Request body requires an <action> parameter for endpoint pathing')
self.action = self.request['action']
# Get the request path
self.path = re.compile
|
('^\/(.*$)').sub(r'\g<1>', self.request_raw.META['PATH_INFO'])
# Set the request endpoint
self.endpoint = '%s/%s' % (self.path, self.action)
# Map the path to a module, class, and API name
self.handler_obj = EndpointMapper(self.endpoint, self.method).handler()
if not self.handler_obj['valid']:
return self._req_error(self.handler_obj['content'])
# Validate the request body
request_err = JSONTemplate(self.handler_obj['content']['api_map']).validate(self.request)
if request_err:
return self._req_error(request_err)
# Set the handler objects
self.api_name = self.handler_obj['content']['api_name']
self.api_mod = self.handler_obj['content']['api_mod']
self.api_class = self.handler_obj['content']['api_class']
self.api_utils = self.handler_obj['content']['api_utils']
def handler(self):
"""
The endpoint manager request handler. Performs a number of validation steps before
passing off the request to the API utility class.
1.) Looks for the base required request parameters
2.) Maps the endpoint and request action to an API utility and validates the request body
3.) Authenticates the user and API key/token
4.) Initializes any required Socket.IO connections for web clients
5.) Launches the API utility class to process the request
6.) Returns either an HTTP response with the status of the request
"""
# Parse the request
try:
validate_err = self._validate()
if validate_err:
return validate_err
except Exception as e:
LOG.exception('Exception while validating request: %s' % str(e))
return self._req_error('Internal server error, failed to validate the request')
# Authenticate the request
try:
auth_err = self._authenticate()
if auth_err:
return auth_err
except Exception as e:
LOG.exception('Exception while authenticating the request: %s' % str(e))
return self._req_error('Internal server error, failed to authenticate the request')
# Check the request against ACLs
acl_gateway = ACLGateway(self.request, self.endpoint, self.api_user)
# If the user is not authorized for this endpoint/object combination
if not acl_gateway.authorized:
return self._req_error(acl_gateway.auth_error)
# Set up the API base
|
darren-wang/ks3
|
keystone/server/eventlet.py
|
Python
|
apache-2.0
| 5,193 | 0.001541 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n
import pbr.version
# NOTE(dstanek): i18n.enable_lazy() must be called before
# keystone.i18n._() is called to ensure it has the desired lazy lookup
# behavior. This includes cases, like keystone.exceptions, where
# keystone.i18n._() is called at import time.
oslo_i18n.enable_lazy()
from keystone.common import environment
from keystone.common import utils
from keystone import config
from keystone.i18n import _
from keystone.openstack.common import service
from keystone.openstack.common import systemd
from keystone.server import common
from keystone import service as keystone_service
CONF = cfg.CONF
class ServerWrapper(object):
"""Wraps a Server with some launching info & capabilities."""
def __init__(self, server, workers):
self.server = server
self.workers = workers
def launch_wit
|
h(self, launcher):
self.server.listen()
if self.workers > 1:
# Use multi-process launcher
launcher.launch_service(self.server, self.workers)
else:
# Use single process launcher
launcher.launch_service(self.server)
def create_server(conf, name, host, port, workers):
app = keystone_service.loadapp('config:%s' % conf, name)
server = environment.Server(app, host=host, port=
|
port,
keepalive=CONF.eventlet_server.tcp_keepalive,
keepidle=CONF.eventlet_server.tcp_keepidle)
if CONF.eventlet_server_ssl.enable:
server.set_ssl(CONF.eventlet_server_ssl.certfile,
CONF.eventlet_server_ssl.keyfile,
CONF.eventlet_server_ssl.ca_certs,
CONF.eventlet_server_ssl.cert_required)
return name, ServerWrapper(server, workers)
def serve(*servers):
logging.warning(_('Running keystone via eventlet is deprecated as of Kilo '
'in favor of running in a WSGI server (e.g. mod_wsgi). '
'Support for keystone under eventlet will be removed in '
'the "M"-Release.'))
if max([server[1].workers for server in servers]) > 1:
launcher = service.ProcessLauncher()
else:
launcher = service.ServiceLauncher()
for name, server in servers:
try:
server.launch_with(launcher)
except socket.error:
logging.exception(_('Failed to start the %(name)s server') % {
'name': name})
raise
# notify calling process we are ready to serve
systemd.notify_once()
for name, server in servers:
launcher.wait()
def _get_workers(worker_type_config_opt):
# Get the value from config, if the config value is None (not set), return
# the number of cpus with a minimum of 2.
worker_count = CONF.eventlet_server.get(worker_type_config_opt)
if not worker_count:
worker_count = max(2, processutils.get_worker_count())
return worker_count
def configure_threading():
monkeypatch_thread = not CONF.standard_threads
pydev_debug_url = utils.setup_remote_pydev_debug()
if pydev_debug_url:
# in order to work around errors caused by monkey patching we have to
# set the thread to False. An explanation is here:
# http://lists.openstack.org/pipermail/openstack-dev/2012-August/
# 000794.html
monkeypatch_thread = False
environment.use_eventlet(monkeypatch_thread)
def run(possible_topdir):
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
common.configure(
version=pbr.version.VersionInfo('keystone').version_string(),
config_files=config_files,
pre_setup_logging_fn=configure_threading)
paste_config = config.find_paste_config()
def create_servers():
public_worker_count = _get_workers('public_workers')
servers = []
servers.append(create_server(paste_config,
'main',
CONF.eventlet_server.public_bind_host,
CONF.eventlet_server.public_port,
public_worker_count))
return servers
_unused, servers = common.setup_backends(
startup_application_fn=create_servers)
serve(*servers)
|
apaksoy/automatetheboringstuff
|
practice projects/chap 08/multiclipboard with deletion chap 8/mcbd.py
|
Python
|
mit
| 2,608 | 0.003451 |
#! python3
'''
mcbd.py - Saves and loads pieces of text from/to the clipboard to/from a
shelf type file.
Usage: python3 mcbd.py save <keyword> - saves clipboard for keyword.
python3 mcbd.py <keyword> - loads to clipboard for keyword.
python3 mcbd.py list - loads all keywords to clipboard.
python3 mcbd.py delete <keyword> - deletes for keyword.
python3 mcbd.py delete - deletes all keywords.
'''
'''
Say you have the boring task of filling out many forms in a web page or
software with several text fields. The clipboard saves you from typing
the same text over and over again. But only one thing can be on the
clipboard at a time. If you have several different pieces of text that
you need to copy and paste, you have to keep highlighting and copying
the same few things over and over again. You can write a Python
program to keep track of multiple pieces of text.
'''
'''
Extend the multiclipboard program in this chapter so that it has a
delete <keyword> command line argument that will delete a keyword from
the shelf. Then add a delete command line argument that will delete all
keywords.
'''
import pyperclip
import shelve
import sys
import textwrap
def print_usage():
print(textwrap.dedent(
'''
Usage: python3 mcbd.py save <keyword> - saves clipboard for keyword.
python3 mcbd.py <keyword> - loads to clipboard for keyword.
python3 mcbd.py list - loads all keywords to clipboard.
python3 mcbd.py delete <keyword> - deletes for keyword.
python3 mcbd.py delete - deletes all keyw
|
ords.
'''))
mcbShelf = shelve.open('mcb') # file created if not already existing
# save or delete specified keywords
if len(sys.argv) == 3:
if sys.argv[1].lower() == 'save':
mcbShelf[sys.argv[2]] = pyperclip.paste()
print('clipboard saved under keyword:', sys.argv[2])
elif sys.argv[1].lower() == 'delete':
del mcbShelf[sys.argv[2]]
print('deleted keyword:', sys.argv[2])
# list or delete all keywords or fetch one
elif len(sys.argv) == 2:
if sys.
|
argv[1].lower() == 'list':
pyperclip.copy(str(list(mcbShelf.keys())))
print('all keywords copied to clipboard')
elif sys.argv[1].lower() == 'delete':
mcbShelf.clear()
print('all keywords deleted')
elif sys.argv[1] in mcbShelf:
pyperclip.copy(mcbShelf[sys.argv[1]])
print('copied to clipboard for keyword:', sys.argv[1])
else:
print('no such keyword:', sys.argv[1])
print_usage()
else:
print_usage()
mcbShelf.close()
|
ostree/plaso
|
plaso/lib/utils.py
|
Python
|
apache-2.0
| 3,494 | 0.013165 |
# -*- coding: utf-8 -*-
"""This file contains utility functions."""
import logging
import re
# Illegal Unicode characters for XML.
ILLEGAL_XML_RE = re.compile(
ur'[\x00-\x08\x0b-\x1f\x7f-\x84\x86-\x9f'
ur'\ud800-\udfff\ufdd0-\ufddf\ufffe-\uffff]')
def IsText(bytes_in, encoding=None):
"""Examine the bytes in and determine if they are indicative of a text.
Parsers need quick and at least semi reliable method of discovering whether
or not a particular byte stream is a text or resembles text or not. This can
be used in text parsers to determine if a file is a text file or not for
instance.
The method assumes the byte sequence is either ASCII, UTF-8, UTF-16 or method
supplied character encoding. Otherwise it will make the assumption the byte
sequence is not text, but a byte sequence.
Args:
bytes_in: The byte sequence passed to the method that needs examination.
encoding: Optional encoding to test, if not defined only ASCII, UTF-8 and
UTF-16 are tried.
Returns:
Boolean value indicating whether or not the byte sequence is a text or not.
"""
# TODO: Improve speed and accuracy of this method.
# Start with the assumption we are dealing with a text.
is_ascii = True
# Check if this is ASCII text string.
for char in bytes_in:
if not 31 < ord(char) < 128:
is_ascii = False
break
# We have an ASCII string.
if is_ascii:
return is_ascii
# Is this already a unicode text?
if isinstance(bytes_in, unicode):
return True
# Check if this is UTF-8
try:
_ = bytes_in.decode('utf-8')
return True
except UnicodeDecodeError:
pass
# TODO: UTF 16 decode is successful in too
# many edge cases where we are not really dealing with
# a text at all. Leaving this out for now, consider
#
|
re-enabling or making a better determination.
#try:
# _ = bytes_in.decode('utf-16-le')
# return True
#except UnicodeDecodeError:
# pass
if encoding:
try:
_ = bytes_in.decode(encoding)
return True
except UnicodeDecodeError:
pass
except LookupError:
logging.error(
u'String encoding not recognized: {0:s}'.format(encoding))
return False
def GetUni
|
codeString(string):
"""Converts the string to Unicode if necessary."""
if not isinstance(string, unicode):
return str(string).decode('utf8', 'ignore')
return string
def GetInodeValue(inode_raw):
"""Read in a 'raw' inode value and try to convert it into an integer.
Args:
inode_raw: A string or an int inode value.
Returns:
An integer inode value.
"""
if isinstance(inode_raw, (int, long)):
return inode_raw
if isinstance(inode_raw, float):
return int(inode_raw)
try:
return int(inode_raw)
except ValueError:
# Let's do one more attempt.
inode_string, _, _ = str(inode_raw).partition('-')
try:
return int(inode_string)
except ValueError:
return -1
def RemoveIllegalXMLCharacters(string, replacement=u'\ufffd'):
"""Removes illegal Unicode characters for XML.
Args:
string: A string to replace all illegal characters for XML.
replacement: A replacement character to use in replacement of all
found illegal characters.
Return:
A string where all illegal Unicode characters for XML have been removed.
If the input is not a string it will be returned unchanged."""
if isinstance(string, basestring):
return ILLEGAL_XML_RE.sub(replacement, string)
return string
|
harmy/kbengine
|
kbe/res/scripts/common/Lib/sre_constants.py
|
Python
|
lgpl-3.0
| 7,444 | 0.002284 |
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UN
|
I_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNI
|
CODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
|
miguelgrinberg/heat
|
heat/engine/clients/os/manila.py
|
Python
|
apache-2.0
| 5,040 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine.clients import client_plugin
from heat.engine import constraints
from manilaclient import client as manila_client
from manilaclient import exceptions
MANILACLIENT_VERSION = "1"
class ManilaClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [SHARE] = ['share']
def _create(self):
endpoint_type = self._get_client_option('manila', 'endpoint_type')
endpoint = self.url_for(service_type=self.SHARE,
endpoint_type=endpoint_type)
args = {
'service_catalog_url': endpoint,
'input_auth_token': self.auth_token
}
client = manila_client.Client(MANILACLIENT_VERSION, **args)
return client
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.RequestEntityTooLarge)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
@staticmethod
def _find_resource_by_id_or_name(id_or_name, resource_list,
resource_type_name):
"""The method is trying to find id or name in item_list
The method searches item with id_or_name in list and returns it.
If there is more than one value or no values then it raises an
exception
:param id_or_name: resource id or name
:param resource_list: list of resources
:param resource_type_name: name of resource type that will be used
for exceptions
:raises NotFound, NoUniqueMatch
:return: resource
|
or generate an exception otherwise
"""
search_result_by_id = [res for res in resource_list
if res.id == id_or_name]
|
if search_result_by_id:
return search_result_by_id[0]
else:
# try to find resource by name
search_result_by_name = [res for res in resource_list
if res.name == id_or_name]
match_count = len(search_result_by_name)
if match_count > 1:
message = ("Ambiguous {0} name '{1}'. Found more than one "
"{0} for this name in Manila."
).format(resource_type_name, id_or_name)
raise exceptions.NoUniqueMatch(message)
elif match_count == 1:
return search_result_by_name[0]
else:
message = ("{0} '{1}' was not found in Manila. Please "
"use the identity of existing {0} in Heat "
"template.").format(resource_type_name, id_or_name)
raise exceptions.NotFound(message=message)
def get_share_type(self, share_type_identity):
return self._find_resource_by_id_or_name(
share_type_identity,
self.client().share_types.list(),
"share type"
)
def get_share_network(self, share_network_identity):
return self._find_resource_by_id_or_name(
share_network_identity,
self.client().share_networks.list(),
"share network"
)
def get_share_snapshot(self, snapshot_identity):
return self._find_resource_by_id_or_name(
snapshot_identity,
self.client().share_snapshots.list(),
"share snapshot"
)
def get_security_service(self, service_identity):
return self._find_resource_by_id_or_name(
service_identity,
self.client().security_services.list(),
'security service'
)
class ManilaShareBaseConstraint(constraints.BaseCustomConstraint):
# check that exceptions module has been loaded. Without this check
# doc tests on gates will fail
expected_exceptions = (exceptions.NotFound, exceptions.NoUniqueMatch)
def validate_with_client(self, client, resource_id):
getattr(client.client_plugin("manila"), self.resource_getter_name)(
resource_id)
class ManilaShareNetworkConstraint(ManilaShareBaseConstraint):
resource_getter_name = 'get_share_network'
class ManilaShareTypeConstraint(ManilaShareBaseConstraint):
resource_getter_name = 'get_share_type'
class ManilaShareSnapshotConstraint(ManilaShareBaseConstraint):
resource_getter_name = 'get_share_snapshot'
|
emop/webrobot
|
hello_baidu/libs/actions.py
|
Python
|
gpl-2.0
| 725 | 0.01931 |
class BaiduSearch(object):
def __init__(self):
pass
|
def __call__(self, client, api, **kw):
"""
client --
client
api --
"""
client.driver.get("http://www.baidu.com")
|
input = client.e("#kw")
input.clear()
input.send_keys(kw['keyword'])
submit = client.e("#su")
submit.click()
#path = client.real_path("screen.png")
client.screenshot_as_file("screen.png")
result_list = client.es(".result tr div.f13")
for item in result_list:
print item.text
print "kw:%s" % str(kw)
|
aivuk/formcreator
|
formcreator/blocks/__init__.py
|
Python
|
bsd-2-clause
| 647 | 0.003091 |
import os
from flask import render_template
from markdown import markdown
__all__ = ["DirContents", "Doc"]
class
|
DirContents(object)
|
:
def __init__(self, dir, name=''):
self.dir = dir
if name != '':
self.name = name
else:
self.name = dir
def get_contents(self):
if not os.path.isdir(self.dir):
os.mkdir(self.dir)
return os.listdir(self.dir)
def html(self):
return render_template('dir_contents.html', dir=self)
class Doc(object):
def __init__(self, text):
self.text = text
def __html__(self):
return markdown(self.text)
|
tadhg-ohiggins/regulations-parser
|
doc2xml.py
|
Python
|
cc0-1.0
| 5,854 | 0.001025 |
""" doc2xml.py
Converts docx files representing a proposed rule into the type of XML we'd
expect from the Federal Register.
Executing: python doc2xml.py file.docx
Writes XML to stdout
Installation:
* Install libxml2 via a package manager
* pip install -e git+https://github.com/savoirfairelinux/python-docx.git#egg=docx
Known limitations:
* Ignores images, tables, equations, similar
* Isn't aware of some bullets and other paragraph markers
* Uses bold and italics (along with string matching) to determine what
headers exist. If the docx uses custom style sheets in
|
stead, it won't
|
work
* Only processes the preamble data, not the CFR changes
""" # noqa
from __future__ import print_function
import re
import sys
from itertools import tee
from lxml import etree
import docx
h2_re = re.compile('[A-Z]\.')
h3_re = re.compile('\d\d?\.')
def text_subel(root, tag, text, **attrs):
"""Util method for allowing a one-liner"""
subel = etree.SubElement(root, tag, **attrs)
subel.text = text
return subel
def has_inline_label(par):
return len(par.runs) > 1 and par.runs[0].bold
def is_heading(par, level):
bold = all(run.bold for run in par.runs if run.text.strip())
italics = all(run.italic for run in par.runs if run.text.strip())
l2_marker = bool(h2_re.match(par.text.strip()))
l3_marker = bool(h3_re.match(par.text.strip()))
if level == 1:
return bold
elif level == 2:
return italics and l2_marker
elif level == 3:
return l3_marker
else:
return False
class Builder(object):
def __init__(self, paragraphs, xml_root):
self._paragraphs = iter(paragraphs) # always iterable
self.xml_root = xml_root
def takewhile(self, fn):
while fn(self.head_p):
yield next(self._paragraphs)
def dropwhile(self, fn):
while fn(self.head_p):
next(self._paragraphs)
return self
def skip_header(self):
def not_header(par):
return not (par.text.strip() and par.runs[0].bold and
not any(c.isdigit() for c in par.text))
self.dropwhile(not_header)
return self
def skip_whitespace(self):
self.dropwhile(lambda p: not p.text.strip())
return self
@property
def head_p(self): # peek; non-destructive
copy1, copy2 = tee(self._paragraphs)
self._paragraphs = copy2
return next(copy1)
def consume_text(self):
return next(self._paragraphs).text.strip()
def intro_header(self, parent, start_p):
label_to_tag = {
'AGENCY': 'AGY',
'ACTION': 'ACT',
'SUMMARY': 'SUM',
'DATES': 'DATES',
'ADDRESSES': 'ADD',
'FOR FURTHER INFORMATION CONTACT': 'FURINF',
}
label = next((l for l in label_to_tag if start_p.text.startswith(l)),
None)
if label:
sub_el = etree.SubElement(parent, label_to_tag[label])
text_subel(sub_el, 'HD', label + ':', SOURCE='HED')
else:
sub_el = etree.SubElement(parent, "UNKNOWN")
text_subel(sub_el, 'HD', start_p.runs[0].text, SOURCE='HED')
return sub_el
def intro_sections(self, preamb):
intro = self.takewhile(
lambda p: not p.text.startswith('SUPPLEMENTARY'))
current_section = None
for par in intro:
if has_inline_label(par):
current_section = self.intro_header(preamb, par)
sub_p = etree.SubElement(current_section, 'P')
text = ''.join(r.text for r in par.runs[1:])
# strip the beginning colon as it's part of the label
sub_p.text = text.lstrip(':').strip()
elif current_section is not None:
sub_p = etree.SubElement(current_section, 'P')
sub_p.text = par.text.strip()
def preamble(self):
preamb = etree.SubElement(self.xml_root, 'PREAMB')
text_subel(preamb, 'AGENCY', self.consume_text())
self.skip_whitespace()
if not self.head_p.text[:1].isdigit():
text_subel(preamb, 'SUBAGENCY', self.consume_text())
self.skip_whitespace()
for tag in ('CFR', 'DEPDOC', 'RIN', 'SUBJECT'):
text_subel(preamb, tag, self.consume_text())
self.skip_whitespace()
self.intro_sections(preamb)
return self
def suplinf(self):
suplinf = etree.SubElement(self.xml_root, 'SUPLINF')
text_subel(suplinf, 'HD', self.consume_text(), SOURCE='HED')
self.dropwhile(lambda p: not is_heading(p, 1))
non_cfr = self.takewhile(
lambda p: not p.text.startswith('List of Subjects'))
for par in non_cfr:
if not par.text.strip():
continue
elif is_heading(par, 1):
text_subel(suplinf, 'HD', par.text.strip(), SOURCE='HD1')
elif is_heading(par, 2):
text_subel(suplinf, 'HD', par.text.strip(), SOURCE='HD2')
elif is_heading(par, 3):
text_subel(suplinf, 'HD', par.text.strip(), SOURCE='HD3')
else:
text_subel(suplinf, 'P', par.text.strip())
def parse(filename):
"""Pulls out and prints some fields/paragraphs from an FR notice"""
builder = Builder(docx.Document(filename).paragraphs,
etree.Element('PRORULE'))
builder.skip_header()
builder.preamble()
builder.skip_whitespace()
builder.suplinf()
return builder.xml_root
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python doc2xml.py file.docx") # noqa
else:
print(etree.tounicode(parse(sys.argv[1]), pretty_print=True)) # noqa
|
CIRCL/AIL-framework
|
update/v2.2/Update.py
|
Python
|
agpl-3.0
| 4,172 | 0.006951 |
#!/usr/bin/env python3
# -*-coding:UTF-8 -*
import os
import re
import sys
import time
import redis
import datetime
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'packages'))
import Item
import Term
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib/'))
import ConfigLoader
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
if __name__ == '__main__':
start_deb = time.time()
config_loader = Confi
|
gLoader.ConfigLoader()
r_serv_term_stats = config_loader.get_redis_conn("ARDB_Trending")
r_serv_
|
termfreq = config_loader.get_redis_conn("ARDB_TermFreq")
config_loader = None
r_serv_term_stats.flushdb()
#convert all regex:
all_regex = r_serv_termfreq.smembers('TrackedRegexSet')
for regex in all_regex:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(regex)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(regex)) )
new_term = regex[1:-1]
res = Term.parse_json_term_to_add({"term": new_term, "type": 'regex', "tags": tags, "mails": mails, "level": 1}, 'admin@admin.test')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('regex_{}'.format(regex))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Regex Removed: {}'.format(regex))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedRegexSet', regex)
all_tokens = r_serv_termfreq.smembers('TrackedSetTermSet')
for token in all_tokens:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(token)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(token)) )
res = Term.parse_json_term_to_add({"term": token, "type": 'word', "tags": tags, "mails": mails, "level": 1}, 'admin@admin.test')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('tracked_{}'.format(token))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Token Removed: {}'.format(token))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedSetTermSet', token)
all_set = r_serv_termfreq.smembers('TrackedSetSet')
for curr_set in all_set:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(curr_set)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(curr_set)) )
to_remove = ',{}'.format(curr_set.split(',')[-1])
new_set = rreplace(curr_set, to_remove, '', 1)
new_set = new_set[2:]
new_set = new_set.replace(',', '')
res = Term.parse_json_term_to_add({"term": new_set, "type": 'set', "nb_words": 1, "tags": tags, "mails": mails, "level": 1}, 'admin@admin.test')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('tracked_{}'.format(curr_set))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Set Removed: {}'.format(curr_set))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedSetSet', curr_set)
r_serv_termfreq.flushdb()
#Set current ail version
r_serv.set('ail:version', 'v2.2')
#Set current ail version
r_serv.hset('ail:update_date', 'v2.2', datetime.datetime.now().strftime("%Y%m%d"))
|
dictation-toolbox/aenea
|
server/linux_wayland/qwerty.py
|
Python
|
lgpl-3.0
| 1,666 | 0.042017 |
from abstractKeyboardMapping import AbstractKeyboardMapping
import evdev
class Qwerty(AbstractKeyboardMapping):
def __init__(self):
super(AbstractKeyboardMapping, self).__init__()
def solo(self):
return { "!" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_1],
"@" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_2],
"#" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_3],
"$" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_4],
"%" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_5],
"^" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_6],
|
"&" : [evdev.ecodes.KEY_LEFTSHIFT, evd
|
ev.ecodes.KEY_7],
"*" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_8],
"(" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_9],
")" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_0],
"_" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_MINUS],
"+" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_EQUAL],
"{" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_LEFTBRACE],
"}" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_RIGHTBRACE],
":" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_SEMICOLON],
"\"" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_APOSTROPHE],
"|" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_BACKSLASH],
"<" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_COMMA],
">" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_DOT],
"?" : [evdev.ecodes.KEY_LEFTSHIFT, evdev.ecodes.KEY_SLASH],
}
def multi(self):
#no multi keys I think
return {}
|
blondegeek/pymatgen
|
pymatgen/apps/borg/hive.py
|
Python
|
mit
| 15,762 | 0.000444 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import abc
import os
import glob
import logging
import json
import warnings
from monty.io import zopen
from pymatgen.io.vasp.inputs import Incar, Potcar, Poscar
from pymatgen.io.vasp.outputs import Vasprun, Oszicar, Dynmat
from pymatgen.io.gaussian import GaussianOutput
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from monty.json import MSONable
"""
This module define the various drones used to assimilate data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Co
|
pyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
logger = logging.getLogger(__name__)
class AbstractDrone(MSONable, metaclass=abc.ABCMeta):
"""
Abstract drone class that defines the various methods that must be
implemented by drones. Because of the quirky nature
|
of Python"s
multiprocessing, the intermediate data representations has to be in the
form of python primitives. So all objects that drones work with must be
MSONable. All drones must also implement the standard MSONable as_dict() and
from_dict API.
"""
@abc.abstractmethod
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen's as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
return
@abc.abstractmethod
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
return
class VaspToComputedEntryDrone(AbstractDrone):
"""
VaspToEntryDrone assimilates directories containing vasp output to
ComputedEntry/ComputedStructureEntry objects. There are some restrictions
on the valid directory structures:
1. There can be only one vasp run in each directory.
2. Directories designated "relax1", "relax2" are considered to be 2 parts
of an aflow style run, and only "relax2" is parsed.
3. The drone parses only the vasprun.xml file.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. See
:class:`pymatgen.io.vasp.Vasprun`. If parameters is None,
a default set of parameters that are necessary for typical
post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
"""
def __init__(self, inc_structure=False, parameters=None, data=None):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"potcar_symbols", "run_type"}
if parameters:
self._parameters.update(parameters)
self._data = data if data else []
def assimilate(self, path):
files = os.listdir(path)
if "relax1" in files and "relax2" in files:
filepath = glob.glob(os.path.join(path, "relax2",
"vasprun.xml*"))[0]
else:
vasprun_files = glob.glob(os.path.join(path, "vasprun.xml*"))
filepath = None
if len(vasprun_files) == 1:
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
# Since multiple files are ambiguous, we will always read
# the one that it the last one alphabetically.
filepath = sorted(vasprun_files)[-1]
warnings.warn("%d vasprun.xml.* found. %s is being parsed." %
(len(vasprun_files), filepath))
try:
vasprun = Vasprun(filepath)
except Exception as ex:
logger.debug("error in {}: {}".format(filepath, ex))
return None
entry = vasprun.get_computed_entry(self._inc_structure,
parameters=self._parameters,
data=self._data)
# entry.parameters["history"] = _get_transformation_history(path)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
if "relax1" in subdirs and "relax2" in subdirs:
return [parent]
if (not parent.endswith("/relax1")) and \
(not parent.endswith("/relax2")) and (
len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0 or (
len(glob.glob(os.path.join(parent, "POSCAR*"))) > 0 and
len(glob.glob(os.path.join(parent, "OSZICAR*"))) > 0)
):
return [parent]
return []
def __str__(self):
return " VaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data},
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone):
"""
A simpler VaspToComputedEntryDrone. Instead of parsing vasprun.xml, it
parses only the INCAR, POTCAR, OSZICAR and KPOINTS files, which are much
smaller and faster to parse. However, much fewer properties are available
compared to the standard VaspToComputedEntryDrone.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries. Structure will be parsed from the CONTCAR.
"""
def __init__(self, inc_structure=False):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"run_type"}
def assimilate(self, path):
files = os.listdir(path)
try:
files_to_parse = {}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = os.path.join(path, "relax1", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = os.path.join(path, "relax2", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[-1]
else:
for filename in (
"INCAR", "POTCAR", "CONTCAR", "OSZICAR", "POSCAR", "DYNMAT"
):
files = sorted(glob.glob(os.path.join(path, filename + "*")))
if len(files) < 1:
continue
if len(files) == 1 or filename == "INCAR" or \
filename == "POTCAR" or filename == "DYNMAT":
files_to_parse[filename] = files[-1]\
if filename ==
|
cyrozap/BBB-Network-Ammeter
|
server.py
|
Python
|
isc
| 5,648 | 0.011863 |
#!/usr/bin/env python
#
# BBB-Network-Ammeter
#
# Copyright (c) 2016, Forest Crossman <cyrozap@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from datetime import datetime
from lxml import etree
from flask import Flask, Response
from Adafruit_BBIO import ADC
app = Flask(__name__)
def get_current():
voltage = get_adc_voltage()
current = 109.2 * voltage + 5.3688
return current
def get_adc_voltage():
# Read a value from the ADC
value = ADC.read("P9_39") # AIN0
# Convert the number to a voltage
voltage = value * 1.8
return voltage
@app.route("/sample")
def sample():
voltage = get_adc_voltage()
return Response("{:.03f} V".format(voltage))
@app.route("/probe")
def probe():
'''Generate a response for probe requests'''
mtconnect_schema = "urn:mtconnect.org:MTConnectDevices:1.3"
schema_url = "http://www.mtconnect.org/schemas/MTConnectDevices_1.3.xsd"
xsi = "http://www.w3.org/2001/XMLSchema-instance"
MTConnectDevices = etree.Element("MTConnectDevices",
nsmap={
None: mtconnect_schema,
"xsi": xsi,
"m": mtconnect_schema,
}
)
MTConnectDevices.attrib["{{{pre}}}schemaLocation".format(pre=xsi)] = \
"{schema} {schema_url}".format(schema=mtconnect_schema, schema_url=schema_url)
creation_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
Header = etree.SubElement(MTConnectDevices, "Header",
creationTime=creation_time,
instanceId="0",
sender="mtcagent",
bufferSize="0",
version="0.1",
assetCount="1",
)
Devices = etree.SubElement(MTConnectDevices, "Devices")
Device = etree.SubElement(Devices, "Device",
id="dev",
iso841Class="6"
|
,
name="currentSensor",
sampleInterval=
|
"10",
uuid="0",
)
Description = etree.SubElement(Device, "Description",
manufacturer="RPI MILL",
)
DataItems_0 = etree.SubElement(Device, "DataItems")
DataItem_0 = etree.SubElement(DataItems_0, "DataItem",
category="EVENT",
id="avail",
type="MACHINE_ON",
)
Components_0 = etree.SubElement(Device, "Components")
Axes = etree.SubElement(Components_0, "Axes", id="ax", name="Axes")
Components_1 = etree.SubElement(Axes, "Components")
Linear = etree.SubElement(Components_1, "Linear", id="x1", name="X")
DataItems_1 = etree.SubElement(Linear, "DataItems")
DataItem_1 = etree.SubElement(DataItems_1, "DataItem",
category="SAMPLE",
id="current1",
name="current1",
nativeUnits="AMPERE",
subType="ACTUAL",
type="CURRENT",
units="AMPERE",
)
response = etree.tostring(MTConnectDevices,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8'
)
return Response(response, mimetype='text/xml')
@app.route("/current")
def current():
mtconnect_schema = "urn:mtconnect.org:MTConnectStreams:1.3"
schema_url = "http://www.mtconnect.org/schemas/MTConnectStreams_1.3.xsd"
xsi = "http://www.w3.org/2001/XMLSchema-instance"
MTConnectStreams = etree.Element("MTConnectStreams",
nsmap={
None: mtconnect_schema,
"xsi": xsi,
"m": mtconnect_schema,
}
)
MTConnectStreams.attrib["{{{pre}}}schemaLocation".format(pre=xsi)] = \
"{schema} {schema_url}".format(schema=mtconnect_schema, schema_url=schema_url)
creation_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
Header = etree.SubElement(MTConnectStreams, "Header",
creationTime=creation_time,
instanceId="0",
sender="mtcagent",
bufferSize="0",
version="0.1",
assetCount="1",
)
Streams = etree.SubElement(MTConnectStreams, "Streams")
DeviceStream = etree.SubElement(Streams, "DeviceStream",
name="VMC-3Axis",
uuid="0",
)
ComponentStream = etree.SubElement(DeviceStream, "ComponentStream",
component="Rotary",
name="C",
componentId="c1",
)
Samples = etree.SubElement(ComponentStream, "Samples")
Current = etree.SubElement(Samples, "Current",
dataItemId="c2",
timestamp=datetime.utcnow().isoformat(),
name="Scurrent",
sequence="8403169415",
subType="ACTUAL",
)
Current.text = "{current:.03f}".format(current=get_current())
Events = etree.SubElement(ComponentStream, "Events")
MachineMode = etree.SubElement(Events, "MachineMode",
dataItemId="machineMode",
timestamp=datetime.utcnow().isoformat(),
name="Cmode",
sequence="18"
)
MachineMode.text = "ON"
response = etree.tostring(MTConnectStreams,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8'
)
return Response(response, mimetype='text/xml')
if __name__ == "__main__":
ADC.setup()
app.run(host='0.0.0.0', debug=False)
|
ATIX-AG/foreman-ansible-modules
|
plugins/doc_fragments/foreman.py
|
Python
|
gpl-3.0
| 9,183 | 0.001198 |
# (c) 2019, Evgeni Golov <evgeni@redhat.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Foreman documentation fragment
DOCUMENTATION = '''
requirements:
- requests
options:
server_url:
description:
- URL of the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_SERVER_URL) will be used instead.
required: true
type: str
username:
description:
- Username accessing the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_USERNAME) will be used instead.
required: true
type: str
password:
description:
- Password of the user accessing the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_PASSWORD) will be used instead.
required: true
type: str
validate_certs:
description:
- Whether or not to verify the TLS certificates of the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_VALIDATE_CERTS) will be used instead.
default: true
type: bool
'''
NESTED_PARAMETERS = '''
options:
parameters:
description:
- Entity domain specific host parameters
required: false
type: list
elements: dict
suboptions:
name:
description:
- Name of the parameter
required: true
type: str
value:
description:
- Value of the parameter
required: true
type: raw
parameter_type:
description:
- Type of the parameter
default: 'string'
choices:
- 'string'
- 'boolean'
- 'integer'
- 'real'
- 'array'
- 'hash'
- 'yaml'
- 'json'
type: str
'''
OS_FAMILY = '''
options:
os_family:
description:
- The OS family the entity shall be assigned with.
required: false
choices:
- AIX
- Altlinux
- Archlinux
- Coreos
- Debian
- Freebsd
- Gentoo
- Junos
- NXOS
- Rancheros
- Redhat
- Solaris
- Suse
- Windows
- Xenserver
type: str
'''
TAXONOMY = '''
options:
organizations:
description: List of organizations the entity should be assigned to
type: list
elements: str
locations:
description: List of locations the entity should be assigned to
type: list
|
elements: str
'''
ENTITY_STATE = '''
options:
state:
description:
- State of the entity
default: present
choices:
- present
- absent
type: str
'''
ENTITY_STATE_WITH_DEFAULTS = '''
options:
state:
description:
- State of the entity
- C(present_with_defaults) will ensure the entity exists, but won't update existing ones
default: present
choices:
- present
-
|
present_with_defaults
- absent
type: str
'''
HOST_OPTIONS = '''
options:
compute_resource:
description: Compute resource name
required: false
type: str
compute_profile:
description: Compute profile name
required: false
type: str
domain:
description: Domain name
required: false
type: str
subnet:
description: IPv4 Subnet name
required: false
type: str
subnet6:
description: IPv6 Subnet name
required: false
type: str
root_pass:
description:
- Root password.
- Will result in the entity always being updated, as the current password cannot be retrieved.
type: str
required: false
realm:
description: Realm name
required: false
type: str
architecture:
description: Architecture name
required: False
type: str
medium:
aliases: [ media ]
description:
- Medium name
- Mutually exclusive with I(kickstart_repository).
required: False
type: str
pxe_loader:
description: PXE Bootloader
required: false
choices:
- PXELinux BIOS
- PXELinux UEFI
- Grub UEFI
- Grub2 BIOS
- Grub2 ELF
- Grub2 UEFI
- Grub2 UEFI SecureBoot
- Grub2 UEFI HTTP
- Grub2 UEFI HTTPS
- Grub2 UEFI HTTPS SecureBoot
- iPXE Embedded
- iPXE UEFI HTTP
- iPXE Chain BIOS
- iPXE Chain UEFI
- None
type: str
ptable:
description: Partition table name
required: False
type: str
environment:
description: Puppet environment name
required: false
type: str
puppetclasses:
description: List of puppet classes to include in this host group. Must exist for hostgroup's puppet environment.
required: false
type: list
elements: str
config_groups:
description: Config groups list
required: false
type: list
elements: str
puppet_proxy:
description: Puppet server proxy name
required: false
type: str
puppet_ca_proxy:
description: Puppet CA proxy name
required: false
type: str
openscap_proxy:
description:
- OpenSCAP proxy name.
- Only available when the OpenSCAP plugin is installed.
required: false
type: str
content_source:
description:
- Content source.
- Only available for Katello installations.
required: false
type: str
lifecycle_environment:
description:
- Lifecycle environment.
- Only available for Katello installations.
required: false
type: str
kickstart_repository:
description:
- Kickstart repository name.
- You need to provide this to use the "Synced Content" feature.
- Mutually exclusive with I(medium).
- Only available for Katello installations.
required: false
type: str
content_view:
description:
- Content view.
- Only available for Katello installations.
required: false
type: str
activation_keys:
description:
- Activation Keys used for deployment.
- Comma separated list.
- Only available for Katello installations.
required: false
type: str
'''
ORGANIZATION = '''
options:
organization:
description:
- Organization that the entity is in
required: true
type: str
'''
SCAP_DATASTREAM = '''
options:
scap_file:
description:
- File containing XML DataStream content.
- Required when creating a new DataStream.
required: false
type: path
original_filename:
description:
- Original file name of the XML file.
- If unset, the filename of I(scap_file) will be used.
required: false
type: str
'''
OPERATINGSYSTEMS = '''
options:
operatingsystems:
description:
- List of operating systems the entity should be assigned to.
- Operating systems are looked up by their title which is composed as "<name> <major>.<minor>".
- You can omit the version part as long as you only have one operating system by that name.
required: false
type: list
elements: str
'''
OPERATINGSYSTEM = '''
options:
operatingsystem:
description:
- Operating systems are looked up by their title which is composed as "<name> <major>.<minor>".
- You can omit the version part as long as you only have one operating system by that name.
type: str
required: False
'''
INFOMODULE = '''
options:
name:
description:
- Name of the resource to fet
|
abezuglov/ANN
|
Storm-Surge/code/ilt_multi_gpu_feed.py
|
Python
|
gpl-3.0
| 12,586 | 0.014063 |
from __future__ import print_function
import numpy as np
import os
import sys
import time
import tensorflow as tf
import load_datasets as ld
import datetime as dt
import ilt_two_layers as ilt
from sklearn.metrics import mean_squared_error
import tensorflow.python.client
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('train', False, ' If True, run training & save model, otherwise -- load a previously saved model and evaluate it')
# Multi-GPU settings
flags.DEFINE_integer('num_gpus',2,'Number of GPUs in the system')
flags.DEFINE_string('tower_name','ivy','Tower names')
# Split the training data into batches. Each hurricane is 193 records. Batch sizes are usually 2^k
# When batch size equals to 0, or exceeds available data, use the whole dataset
# Large batch sizes produce more accurate update gradients, but the training is slower
flags.DEFINE_integer('batch_size', 57*193, 'Batch size. Divides evenly into the dataset size of 193')
# Save models in this directory
flags.DEFINE_string('checkpoints_dir', './models/save_two_layers_32_64_sept', 'Directory to store checkpoints')
# Statistics
flags.DEFINE_string('summaries_dir','./logs','Summaries directory')
# Evaluation
# Output dataset
flags.DEFINE_string('output','./test_tracks_out/isabel_test_track_out.dat','When model evaluation, output the data here')
# Input dataset
flags.DEFINE_string('input','./test_tracks/isabel_test_track.dat','Dataset for input')
def fill_feed_dict(data_set, inputs_pl, outputs_pl, train):
"""
Returns feed dictionary for TF.
data_set -- dataset
inputs_pl -- TF placeholder for inputs
outputs_pl -- TF placeholder for outputs
train -- if TRUE, then return DS in batches for training. Otherwise, return complete DS for validation/testing
"""
if train:
batch_size = FLAGS.batch_size
else:
batch_size = 0
# Read next batch of data from the dataset
inputs, outputs = data_set.next_batch(batch_size = batch_size)
# Create dictionary for return
feed_dict = {
inputs_pl: inputs,
outputs_pl: outputs
}
return feed_dict
def tower_loss(x, y_, scope):
"""
Calculate the total loss on a single tower
x, y_ -- inputs and expected outputs
scope -- unique prefix identifying the tower
Note: The graph is created on /cpu:0. The code below reuses the graph
"""
# Run inference and calculate the losses. The losses are stored in the collection
# so skip the returns
outputs = ilt.inference(x)
_ = ilt.loss(outputs, y_)
# Read the losses from the collection and sum them up
losses = tf.get_collection('losses', scope)
total_loss = tf.add_n(losses, name='total_loss')
loss_avg = tf.train.ExponentialMovingAverage(FLAGS.moving_avg_decay, name='avg')
loss_avg_op = loss_avg.apply(losses+[total_loss])
with tf.control_dependencies([loss_avg_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""
Calculate the average gradient for each shared variable across all towers
tower_grads -- list of lists of tuples (gradient, variable)
"""
average_grads = []
# zip(*tower_grads) puts grads for each variable together
# grad_and_vars is a tuple of tuples ((grad_gpu1, var1),(grad_gpu2, var1))
for grad_and_vars in zip(*tower_grads):
grads = []
# g each individual gradient
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g,0)
grads.append(expanded_g)
# grad average gradient across the gpu's
grad = tf.concat(0,grads)
grad = tf.reduce_mean(grad,0)
# get the variable as the second element from the first tuple
v = grad_and_vars[0][1]
# combine the gradient and append it to the average_grads
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""
Build the graph and run training on multiple GPU's
"""
# Assign datasets
train_dataset, valid_dataset, test_dataset = ld.read
|
_data_sets()
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Prepare placeholders for inputs and expected outputs
x = tf.
|
placeholder(tf.float32, [None, FLAGS.input_vars], name='x-input') # Note: these are normalized inputs
y_ = tf.placeholder(tf.float32, [None, FLAGS.output_vars], name = 'y-input')
# Create variables for input and output data moments and initialize them with train datasets' moments
input_means = tf.get_variable('input_means', trainable = False,
initializer = tf.convert_to_tensor(train_dataset.input_moments[0]))
input_stds = tf.get_variable('input_stds', trainable = False,
initializer = tf.convert_to_tensor(train_dataset.input_moments[1]))
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate, global_step, FLAGS.max_steps,
FLAGS.learning_rate_decay, staircase=False)
# create a standard gradient descent optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
# tower_grads -- list of gradients (list of list of tuples like (grad1, var1))
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d'%i): # make sure TF runs the code on the GPU:%d tower
with tf.name_scope('%s_%d' % (FLAGS.tower_name, i)) as scope:
# Construct the entire ANN, but share the vars across the towers
loss = tower_loss(x, y_, scope)
# Make sure that the vars are reused for the next tower
tf.get_variable_scope().reuse_variables()
#summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# calculate the gradients and add them to the list
grads = optimizer.compute_gradients(loss)
tower_grads.append(grads)
# Add this here in case we need to get outputs after training is complete
outputs = ilt.inference(x)
#summaries.append(tf.scalar_summary('MSE',loss))
# calculate average gradients & apply gradients to the model
grads, v = zip(*average_gradients(tower_grads))
grads, _ = tf.clip_by_global_norm(grads, 1.25)
apply_gradient_op = optimizer.apply_gradients(zip(grads,v), global_step = global_step)
#for grad, var in grads:
#if grad is not None:
#summaries.append(tf.histogram_summary(var.op.name+'/gradients', grad))
#summaries.append(tf.scalar_summary(var.op.name+'/sparsity',tf.nn.zero_fraction(var)))
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_avg_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
train_op = tf.group(apply_gradient_op, variables_averages_op)
train_op = apply_gradient_op
#merged = tf.merge_all_summaries()
init = tf.initialize_all_variables()
sess = tf.Session(config = tf.ConfigProto(
allow_soft_placement = True, # allows to utilize GPU's & CPU's
log_device_placement = False)) # shows GPU/CPU allocation
# Prepare folders for saving models and its stats
#date_time_stamp = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/train/'+date_time_stamp) #,sess.graph)
#test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/validation/'+date_time_stamp)
saver = tf.train.Saver(tf.all_variables())
# Below is the code for running graph
sess.run(init)
tf.train.start_queue_runners(sess=sess)
valid_loss = 1.0
train_loss = 1.0
train_losses = 0
num_steps = 0
# Main training loop
for
|
SlicingDice/slicingdice-python
|
pyslicer/client.py
|
Python
|
mit
| 12,918 | 0.000077 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 The Simbiose Ventures Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python client to Slicing Dice API"""
import ujson
from . import exceptions
from .api import SlicingDiceAPI
from .url_resources import URLResources
from .utils import validators
class SlicingDice(SlicingDiceAPI):
"""A python interface to Slicing Dice API
Example usage:
To create an object of the SlicingDice:
from pyslicer.api import SlicingDice
sd = SlicingDice('my-token')
To create a column:
column_json = {
'name': 'Pyslicer String Column',
'description': 'Pyslicer example description',
'type': 'string',
'cardinality': 'low'}
print sd.create_column(column_json)
To make a query:
query_json = {
'type': 'count',
'select': [
{
"pyslicer-string-column":
{
"equal": "test_value_1"
}
},
"or",
{
"pyslicer-string-column":
{
"equal": "test_value_2"
}
},
]
}
print sd.query(query_json)
To insert data:
inserting_json = {
'foo@bar.com': {
'pyslicer-string-column': 'test_value_1',
'pyslicer-integer-column': 42,
},
'baz@bar.com': {
'pyslicer-string-column': 'test_value_2',
'pyslicer-integer-column': 42,
},
}
print sd.insert(inserting_json)
"""
def __init__(
self, write_key=None, read_key=None, master_key=None,
custom_key=None, use_ssl=True, timeout=60):
"""Instantiate a new SlicingDice object.
Keyword arguments:
key(string or SlicerKey obj) -- Key to access API
use_ssl(bool) -- Define if the request uses verification SSL for
HTTPS requests. Defaults False.(Optional)
timeout(int) -- Define timeout to request,
defaults 60 secs(default 30).
"""
super(SlicingDice, self).__init__(
master_key, write_key, read_key, custom_key, use_ssl, timeout)
def _count_query_wrapper(self, url, query):
"""Validate count query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A count query
"""
sd_count_query = validators.QueryCountValidator(query)
if sd_count_query.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def _data_extraction_wrapper(self, url, query):
"""Validate data extraction query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A data extraction query
"""
sd_extraction_result = validators.QueryDataExtractionValidator(query)
if sd_extraction_result.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def _saved_query_wrapper(self, url, query, update=False):
"""Validate saved query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A saved query
update(bool) -- Indicates with operation is update a
saved query or not.(default false)
"""
req_type = "post"
if update:
req_type = "put"
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type=req_type,
key_level=2)
def get_database(self):
"""Get a database associated with this client (related to keys passed
on construction)"""
url = SlicingDice.BASE_URL + URLResources.DATABASE
return self._make_request(
url=url,
req_type="get",
key_level=2
)
def create_column(self, data):
"""Create column in Slicing Dice
Keyword arguments:
data -- A dictionary or list on the Slicing Dice column
format.
"""
sd_data = validators.ColumnValidator(data)
if sd_data.validator():
url = SlicingDice.BASE_URL + URLResources.COLUMN
return self._make_request(
url=url,
req_type="post",
json_data=ujson.dumps(data),
key_level=1)
def get_columns(self):
"""Get a list of columns"""
url = SlicingDice.BASE_URL + URLResources.COLUMN
return self._make_request(
url=url,
req_type="get",
key_level=2)
def insert(self, data):
"""Insert data into Slicing Dice API
Keyword arguments:
data -- A dictionary in the Slicing Dice data format
format.
"""
sd_data = validators.InsertValidator(data)
if sd_data.validator():
url = SlicingDice.BASE_URL + URLResources.INSERT
return self._make_request(
|
url=url,
json_data=ujson.dumps(data),
req_type="post",
key_level=1)
def count_entity(self, query):
"""Make a count entity query
Keyword arguments:
query -- A dictionary in the Slicing Dice query
"""
url = SlicingDice.BASE_URL + URLReso
|
urces.QUERY_COUNT_ENTITY
return self._count_query_wrapper(url, query)
def count_entity_total(self, dimensions=None):
"""Make a count entity total query
Keyword arguments:
dimensions -- A dictionary containing the dimensions in which
the total query will be performed
"""
query = {}
if dimensions is not None:
query['dimensions'] = dimensions
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_ENTITY_TOTAL
return self._make_request(
url=url,
req_type="post",
json_data=ujson.dumps(query),
key_level=0)
def count_event(self, query):
"""Make a count event query
Keyword arguments:
data -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_EVENT
return self._count_query_wrapper(url, query)
def aggregation(self, query):
"""Make a aggregation query
Keyword arguments:
query -- An aggregation query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_AGGREGATION
if "query" not in query:
raise exceptions.InvalidQueryException(
"The aggregation query must have up the key 'query'.")
columns = query["query"]
if len(columns) > 5:
raise exceptions.MaxLimitException(
"The aggregation query must have up to 5 columns per request.")
return self._make_request(
url=url,
j
|
oliver/meld
|
vc/cdv.py
|
Python
|
gpl-2.0
| 1,787 | 0.012311 |
### Copyright (C) 2009 Vincent Legoll <vincent.legoll@gmail.com>
### Redistribution and use in source and binary forms, with or without
### modification, are permitted provided that the following conditions
### are met:
###
### 1. Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following disclaimer.
### 2. Redistributions in binary form must reproduce the above copyright
### notice, this list of conditions and the following disclaimer in the
### documentation and/or other materials provided with the distribution.
### THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
### IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIE
|
S
### OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
### IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
### INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, B
|
UT
### NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
### DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
### THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
### (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
### THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import _vc
import svn
class Vc(svn.Vc):
CMD = "cdv"
NAME = "Codeville"
VC_DIR = ".cdv"
PATCH_INDEX_RE = "^[+]{3} (.+)$"
state_map = {"M": _vc.STATE_MODIFIED,}
def get_working_directory(self, workdir):
return self.root
def _get_matches(self, directory):
entries = _vc.popen([self.CMD, "status"], cwd=directory).read()
for line in entries.split("\n")[1:-1]:
yield line[3:], line[0], ""
|
zakandrewking/cobrapy
|
cobra/util/solver.py
|
Python
|
lgpl-2.1
| 15,435 | 0 |
# -*- coding: utf-8 -*-
"""Additional helper functions for the optlang solvers.
All functions integrate well with the context manager, meaning that
all operations defined here are automatically reverted when used in a
`with model:` block.
The functions defined here together with the existing model functions should
allow you to implement custom flux analysis methods with ease.
"""
from __future__ import absolute_import
import re
from functools import partial
from collections import namedtuple
from types import ModuleType
from warnings import warn
import optlang
from optlang.symbolics import Basic, Zero
from cobra.exceptions import OptimizationError, OPTLANG_TO_EXCEPTIONS_DICT
from cobra.util.context import get_context
class SolverNotFound(Exception):
"""A simple Exception when a solver can not be found."""
pass
# Define all the solvers that are found in optlang.
solvers = {match.split("_")[0]: getattr(optlang, match)
for match in dir(optlang) if "_interface" in match}
# Defines all the QP solvers implemented in optlang.
qp_solvers = ["cplex"] # QP in gurobi not implemented yet
def linear_reaction_coefficients(model, reactions=None):
"""Coefficient for the reactions in a linear objective.
Parameters
----------
model : cobra model
the model object that defined the objective
reactions : list
an optional list for the reactions to get the coefficients for. All
reactions if left missing.
Returns
-------
dict
A dictionary where the key is the reaction object and the value is
the corresponding coefficient. Empty dictionary if there are no
linear terms in the objective.
"""
linear_coefficients = {}
reactions = model.reactions if not reactions else reactions
try:
objective_expression = model.solver.objective.expression
coefficients = objective_expression.as_coefficients_dict()
except AttributeError:
return linear_coefficients
for rxn in reactions:
forward_coefficient = coefficients.get(rxn.forward_variable, 0)
reverse_coefficient = coefficients.get(rxn.reverse_variable, 0)
if forward_coefficient != 0:
if forward_coefficient == -reverse_coefficient:
linear_coefficients[rxn] = float(forward_coefficient)
return linear_coefficients
def _valid_atoms(model, expression):
"""Check whether a sympy expression references the correct variables.
Parameters
----------
model : cobra.Model
The model in which to check for variables.
expression : sympy.Basic
A sympy expression.
Returns
-------
boolean
True if all referenced variables are contained in model, False
otherwise.
"""
atoms = expression.atoms(optlang.interface.Variable)
return all(a.problem is model.solver for a in atoms)
def set_objective(model, value, additive=False):
"""Set the model objective.
Parameters
----------
model : cobra model
The model to set the objective for
value : model.problem.Objective,
e.g. optlang.glpk_interface.Objective, sympy.Basic or dict
If the model objective is linear, the value can be a new Objective
object or a dictionary with linear coefficients where each key is a
reaction and the element the new coefficient (float).
If the objective is not linear and `additive` is true, only values
of class Objective.
additive : bool
If true, add the terms to the current objective, otherwise start with
an empty objective.
"""
interface = model.problem
reverse_value = model.solver.objective.expression
reverse_value = interface.Objective(
reverse_value, direction=model.solver.objective.direction,
sloppy=True)
if isinstance(value, dict):
if not model.objective.is_Linear:
raise ValueError('can only update non-linear objectives '
'additively using object of class '
'model.problem.Objective, not %s' %
type(value))
if not additive:
model.solver.objective = interface.Objective(
Zero, direction=model.solver.objective.direction)
for reaction, coef in value.items():
model.solver.objective.set_linear_coefficients(
{reaction.forward_variable: coef,
reaction.reverse_variable: -coef})
elif isinstance(value, (Basic, optlang.interface.Objective)):
if isinstance(value, Basic):
value = interface.Objective(
value, direction=model.solver.objective.direction,
sloppy=False)
# Check whether expression only uses variables from current model
# clone the objective if not, faster than cloning without checking
if not _valid_atoms(model, value.expression):
value = interface.Objective.clone(value, model=model.solver)
if not additive:
model.solver.objective = value
else:
model.solver.objective += value.expression
else:
raise TypeError(
'%r is not a valid objective for %r.' % (value, model.solver))
context = get_context(model)
if context:
def reset():
model.solver.objective = reverse_value
model.solver.objective.direction =
|
reverse_value.direction
context(reset)
def interface_to_str(interface):
"""Give a string representation for an optlang interface.
Parameters
----------
interface : string, ModuleType
Full name of the interface in optlang or cobra representation.
For instance 'optlang.glp
|
k_interface' or 'optlang-glpk'.
Returns
-------
string
The name of the interface as a string
"""
if isinstance(interface, ModuleType):
interface = interface.__name__
return re.sub(r"optlang.|.interface", "", interface)
def get_solver_name(mip=False, qp=False):
"""Select a solver for a given optimization problem.
Parameters
----------
mip : bool
Does the solver require mixed integer linear programming capabilities?
qp : bool
Does the solver require quadratic programming capabilities?
Returns
-------
string
The name of feasible solver.
Raises
------
SolverNotFound
If no suitable solver could be found.
"""
if len(solvers) == 0:
raise SolverNotFound("no solvers installed")
# Those lists need to be updated as optlang implements more solvers
mip_order = ["gurobi", "cplex", "glpk"]
lp_order = ["glpk", "cplex", "gurobi"]
qp_order = ["cplex"]
if mip is False and qp is False:
for solver_name in lp_order:
if solver_name in solvers:
return solver_name
# none of them are in the list order - so return the first one
return list(solvers)[0]
elif qp: # mip does not yet matter for this determination
for solver_name in qp_order:
if solver_name in solvers:
return solver_name
raise SolverNotFound("no qp-capable solver found")
else:
for solver_name in mip_order:
if solver_name in solvers:
return solver_name
raise SolverNotFound("no mip-capable solver found")
def choose_solver(model, solver=None, qp=False):
"""Choose a solver given a solver name and model.
This will choose a solver compatible with the model and required
capabilities. Also respects model.solver where it can.
Parameters
----------
model : a cobra model
The model for which to choose the solver.
solver : str, optional
The name of the solver to be used. Optlang solvers should be prefixed
by "optlang-", for instance "optlang-glpk".
qp : boolean, optional
Whether the solver needs Quadratic Programming capabilities.
Returns
-------
legacy : boolean
Whether the returned solver is a legacy (old cobra solvers) version or
an optlang s
|
OpenLinkedSocialData/ocd
|
OCD.py
|
Python
|
unlicense
| 24,833 | 0.021655 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import cPickle as pickle, time, string
from SPARQLWrapper import SPARQLWrapper, JSON
import rdflib as r, pygraphviz as gv
import pylab as pl
# variaveis principais:
# classes (kk), props,
# vizinhanca_ (de classes)
T=time.time()
U=r.URIRef
def fazQuery(query):
NOW=time.time()
#sparql = SPARQLWrapper("http://200.144.255.210:8082/cidadedemocratica/query")
sparql = SPARQLWrapper("http://200.144.255.210:8082/cd/query")
sparql.setQuery(PREFIX+query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print time.time()-NOW
return results["results"]["bindings"]
g=r.Graph()
def G(S,P,O):
global g
g.add((S,P,O))
owl = r.namespace.OWL
rdf = r.namespace.RDF
rdfs = r.namespace.RDFS
ocd = r.Namespace("http://purl.org/socialparticipation/ocd/")
xsd = r.namespace.XSD
notFunctionalProperties=["tagged","contact","supporter"]
notFunctionalProperties_=[ocd+i for i in notFunctionalProperties]
####
# Roteiro de métodos para construção da ontologia baseada nos dados
# data driven ontology
# 0) Triplifica conforme triplificaCD.py
# usa nomes mínimos para propriedades e classes como :body ou :name, classes como
# commentBody ou userName devem ser evitadas
# na triplificação. Podendo ser observadas e adicionadas
# no levantamento da ontologia.
# FEITO
# 0.5) Coloca dados triplificados num endpoint sparql para fazer as queries necessárias
# para o levantamento da ontologia.
# FEITO
# 1) Obtencao de todas as classes
# ?o where { ?s rdf:type ?o }
# com algumas excessoes
PREFIX="""PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ops: <http://purl.org/socialparticipation/ops#>
PREFIX opa: <http://purl.org/socialparticipation/opa#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dcty: <http://purl.org/dc/dcmitype/>
PREFIX tsioc: <http://rdfs.org/sioc/types#>
PREFIX sioc: <http://rdfs.org/sioc/ns#>
PREFIX schema: <http://schema.org/>
PREFIX aa: <http://purl.org/socialparticipation/aa/>
PREFIX ocd: <http://purl.org/socialparticipation/ocd/>"""
# CHECK TTM
q="SELECT DISTINCT ?o WHERE {?s rdf:type ?o}"
NOW=time.time()
sparql = SPARQLWrapper("http://200.144.255.210:8082/cd/query")
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("%.2f segundos para puxar todas as classes"%
(time.time()-NOW,))
classes=[i["o"]["value"] for i in results["results"]["bindings"] if "w3.org" not in i["o"]["value"]]
# 2) Obtem todas as propriedades
# ?p where { ?s ?p ?o. }
# com algumas excessoes
q="SELECT DISTINCT ?p WHERE {?s ?p ?o}"
NOW=time.time()
sparql = SPARQLWrapper("http://200.144.255.210:8082/cd/query")
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("%.2f segundos para puxar todas as propriedades"%
(time.time()-NOW,))
props=[i["p"]["value"] for i in results["results"]["bindings"] if "w3.org" not in i["p"]["value"]]
props_=[i.split("/")[-1] for i in props]
# 3) Faz estrutura para cada classe e uma figura:
# classe no meio, dados à esquerda, classes à direita
# para cada classe, para cada individuo da classe,
# ver as relacoes estabelecidas com o individuo como
# sujeito e como objeto. Anotar a propriedade e o tipo de dado
# na ponta
# guarda a estrutura de relacionamento da classe.
vizinhanca={}
vizinhanca_={}
for classe in classes:
#res=fazQuery("SELECT DISTINCT ?p (datatype(?o) as ?do) WHERE { ?i a <%s> . ?i ?p ?o }"%(classe,))
NOW=time.time()
print("\n%s antecedente, consequente: "%(classe.split("/")[-1],))
ant=fazQuery("SELECT DISTINCT ?p ?cs (datatype(?s) as ?ds) WHERE { ?i a <%s> . ?s ?p ?i . OPTIONAL { ?s a ?cs . } }"%(classe,))
ant_=[]
for aa in ant:
if "cs" in aa.keys():
tobj=aa["cs"]["value"]
ant_.append((tobj,aa["p"]["value"]))
elif (("ds" in aa.keys()) and ("w3.org" not in aa["p"]["value"])):
tobj=aa["ds"]["value"]
ant_.append((tobj,aa["p"]["value"]))
cons=fazQuery("SELECT DISTINCT ?p ?co (datatype(?o) as ?do) WHERE { ?i a <%s> . ?i ?p ?o . OPTIONAL { ?o a ?co . } }"%(classe,))
cons_=[]
for cc in cons:
if "co" in cc.keys():
tobj=cc["co"]["value"]
cons_.append((cc["p"]["value"],tobj))
elif (("do" in cc.keys()) and ("w3.org" not in cc["p"]["value"])):
tobj=cc["do"]["value"]
cons_.append((cc["p"]["value"],tobj))
elif "/mbox" in cc["p"]["value"]:
tobj="XMLSchema#anyURI"
cons_.append((cc["p"]["value"],tobj))
vizinhanca[classe]=(ant,cons)
vizinhanca_[classe]=(ant_,cons_)
f=open("dumpVV.pickle","wb")
vv=(vizinhanca,vizinhanca_)
pickle.dump(vv,f)
f.close()
fo=open("dumpVV.pickle","rb")
vv_=pickle.load(fo)
fo.close()
kk=vv_[1].keys()
for tkey in kk:
cl=tkey
cl_=cl.split("/")[-1]
print cl_
ex=vv_[1][cl]
A=gv.AGraph(directed=True)
A.graph_attr["label"]=("classe: %s, no namespace interno: http://purl.org/socialparticipation/ocd/"%(cl_,))
for i in xrange(len(ex[0])): # antecedentes
label=ex[0][i][0].split("/")[-1]
elabel=ex[0][i][1].split("/")[-1]
print label, elabel
A.add_node(label,style="filled")
A.add_edge(label,cl_)
e=A.get_edge(label,cl_)
e.attr["label"]=elabel
n=A.get_node(label)
n.attr['color']="#A2F3D1"
print("\n\n")
for i in xrange(len(ex[1])): # consequentes
label=ex[1][i][1].split("/")[-1]
elabel=ex[1][i][0].split("/")[-1]
print elabel, label
if "XMLS" in label:
label_=i
else:
label_=label
A.add_node(label_,style="filled")
A.add_edge(cl_,label_)
e=A.get_edge(cl_,label_)
e.attr["label"]=elabel
n=A.get_node(label_)
n.attr['label']=label
if "XMLS" in label:
n.attr['color']="#FFE4AA"
else:
n.attr['color']="#A2F3D1"
n=A.get_node(cl_)
n.attr['style']="filled"
n.attr['color']="#6EAA91"
nome=("imgs/classes/%s.png"%(cl_,))
A.draw(nome,prog="dot") # draw to png using circo
print("Wrote %s"%(nome,))
# 4) Faz estrutura geral e figura geral
A=gv.AGraph(directed=True)
A.graph_attr["label"]="Diagrama geral da OCD no namespace interno: http://purl.org/socialparticipation/ocd/"
ii=1
for tkey in kk:
cl_=tkey.split("/")[-1]
if cl_ not in A.nodes():
A.add_node(cl_,style="filled")
n=A.get_node(cl_)
n.attr['color']="#A2F3D1"
ex=vv_[1][tkey]
for i in xrange(len(ex[0])):
label=ex[0][i][0].split("/")[-1]
elabel=ex[0][i][1].split("/")[-1]
print elabel
if label not in A.nodes():
A.add_node(label,style="filled")
n=A.get_node(label)
n.attr['color']="#A2F3D1"
A.add_edge(label,cl_)
e=A.get_edge(label,cl_)
e.attr["label"]=elabel
print("\n\n")
for i in xrange(len(ex[1])):
label=ex[1][i][1].split("/")[-1]
elabel=ex[1][i][0].split("/")[-1]
print elabel, label
if "XMLS" in label:
label_=ii; ii+=1
color="#FFE4AA"
else:
label_=label
color="#A2F3D1"
if label_ not in A.nodes():
A.add_node(label_,style="filled")
n=A.get_node(label_)
n.attr['label']=label.split("#")[-1]
n.attr['color']=color
|
A.add_edge(cl_,label_)
e=A.get_edge(cl_,label_)
e.attr["label"]=elabel
e.attr["color"]=color
e.attr["penwidth"]=2
A.draw("
|
imgs/OCD.png",prog="twopi",args="-Granksep=4")
A.draw("imgs/OCD2.png",prog="dot",args="-Granksep=.4 -Gsize='1000,1000'")
print("Wrote geral")
# 4.5) qualificar literais
## ok.
# 5) Observando as triplas, observar hierarquias e conceitos especificos do namespace,
# como commentBody e userName. Ver README.md.
G(ocd.Problem, rdfs.subClassOf,ocd.Post)
G(ocd.Proposal, rdfs.subClassOf,ocd.Post)
G(ocd.supportCount, rdfs.subPropertyOf ,ocd.cou
|
hachard/Cra-Magnet
|
flask/lib/python3.5/site-packages/flask_babel/speaklater.py
|
Python
|
gpl-3.0
| 1,713 | 0.000584 |
# -*- coding: utf-8 -*-
from flask_babel._compat import text_type
class LazyString(object):
def __init__(self, func, *args, **kwargs):
sel
|
f._func =
|
func
self._args = args
self._kwargs = kwargs
def __getattr__(self, attr):
string = text_type(self)
if hasattr(string, attr):
return getattr(string, attr)
raise AttributeError(attr)
def __str__(self):
return text_type(self._func(*self._args, **self._kwargs))
def __len__(self):
return len(text_type(self))
def __getitem__(self, key):
return text_type(self)[key]
def __iter__(self):
return iter(text_type(self))
def __contains__(self, item):
return item in text_type(self)
def __add__(self, other):
return text_type(self) + other
def __radd__(self, other):
return other + text_type(self)
def __mul__(self, other):
return text_type(self) * other
def __rmul__(self, other):
return other * text_type(self)
def __lt__(self, other):
return text_type(self) < other
def __le__(self, other):
return text_type(self) <= other
def __eq__(self, other):
return text_type(self) == other
def __ne__(self, other):
return text_type(self) != other
def __gt__(self, other):
return text_type(self) > other
def __ge__(self, other):
return text_type(self) >= other
def __html__(self):
return text_type(self)
def __hash__(self):
return hash(text_type(self))
def __mod__(self, other):
return text_type(self) % other
def __rmod__(self, other):
return other + text_type(self)
|
yarikoptic/Fail2Ban-Old-SVNGIT
|
server/datetemplate.py
|
Python
|
gpl-2.0
| 5,047 | 0.033902 |
# -*- coding: utf-8 -*-
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Author: Cyril Jaquier
#
# $Revision$
__author__ = "Cyril Jaquier"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import re, time
from mytime import MyTime
import iso8601
class DateTemplate:
def __init__(self):
self.__name = ""
self.__regex = ""
self.__cRegex = None
self.__hits = 0
def setName(self, name):
self.__name = name
def getName(self):
return self.__name
def setRegex(self, regex):
self.__regex = regex.strip()
self.__cRegex = re.compile(regex)
def getRegex(self):
return self.__regex
def getHits(self):
return self.__hits
def matchDate(self, line):
dateMatch = self.__cRegex.search(line)
if not dateMatch == None:
self.__hits += 1
return dateMatch
def getDate(self, line):
raise Exception("matchDate() is abstract")
class DateEpoch(DateTemplate):
def __init__(self):
DateTemplate.__init__(self)
# We already know the format for TAI64N
self.setRegex("^\d{10}(\.\d{6})?")
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
# extract part of format which represents seconds since epoch
date = list(time.localtime(float(dateMatch.group())))
return date
##
# Use strptime() to parse a date. Our current locale is the 'C'
# one because we do not set the locale explicitly. This is POSIX
# standard.
class DateStrptime(DateTemplate):
TABLE = dict()
TABLE["Jan"] = []
TABLE["Feb"] = [u"Fév"]
TABLE["Mar"] = [u"Mär"]
TABLE["Apr"] = ["Avr"]
TABLE["May"] = ["Mai"]
TABLE["Jun"] = []
TABLE["Jul"] = []
TABLE["Aug"] = ["Aou"]
T
|
ABLE["Sep"] = []
TABLE["Oct"] = ["Okt"]
TABLE["Nov"] = []
TABLE["Dec"] = [u"Déc", "Dez"]
def __init__(self):
DateTemplate.__init__(self)
self.__pattern = ""
def setPattern(self, pattern):
self.__pattern = pattern.strip()
def getPattern(self):
return self.__pattern
#@staticmethod
def convertLocale(date):
for t i
|
n DateStrptime.TABLE:
for m in DateStrptime.TABLE[t]:
if date.find(m) >= 0:
return date.replace(m, t)
return date
convertLocale = staticmethod(convertLocale)
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
try:
# Try first with 'C' locale
date = list(time.strptime(dateMatch.group(), self.getPattern()))
except ValueError:
# Try to convert date string to 'C' locale
conv = self.convertLocale(dateMatch.group())
try:
date = list(time.strptime(conv, self.getPattern()))
except ValueError, e:
# Try to add the current year to the pattern. Should fix
# the "Feb 29" issue.
conv += " %s" % MyTime.gmtime()[0]
pattern = "%s %%Y" % self.getPattern()
date = list(time.strptime(conv, pattern))
if date[0] < 2000:
# There is probably no year field in the logs
date[0] = MyTime.gmtime()[0]
# Bug fix for #1241756
# If the date is greater than the current time, we suppose
# that the log is not from this year but from the year before
if time.mktime(date) > MyTime.time():
date[0] -= 1
elif date[1] == 1 and date[2] == 1:
# If it is Jan 1st, it is either really Jan 1st or there
# is neither month nor day in the log.
date[1] = MyTime.gmtime()[1]
date[2] = MyTime.gmtime()[2]
return date
class DateTai64n(DateTemplate):
def __init__(self):
DateTemplate.__init__(self)
# We already know the format for TAI64N
self.setRegex("@[0-9a-f]{24}")
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
# extract part of format which represents seconds since epoch
value = dateMatch.group()
seconds_since_epoch = value[2:17]
# convert seconds from HEX into local time stamp
date = list(time.localtime(int(seconds_since_epoch, 16)))
return date
class DateISO8601(DateTemplate):
def __init__(self):
DateTemplate.__init__(self)
date_re = "[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}" \
".[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?" \
"(Z|(([-+])([0-9]{2}):([0-9]{2})))?"
self.setRegex(date_re)
def getDate(self, line):
date = None
dateMatch = self.matchDate(line)
if dateMatch:
# Parses the date.
value = dateMatch.group()
date = list(iso8601.parse_date(value).timetuple())
return date
|
lucienfostier/gaffer
|
python/GafferUITest/MessageWidgetTest.py
|
Python
|
bsd-3-clause
| 4,866 | 0.044595 |
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferUITest
from Gaffer.Private.IECorePreview import Message
from Gaffer.Private.IECorePreview import Messages
class MessageWidgetTest( GafferUITest.TestCase ) :
def assertCounts( self, widget, debug, info, warning, error ) :
self.assertEqual( widget.messageCount( IECore.Msg.Level.Debug ), debug )
self.assertEqual( widget.messageCount( IECore
|
.Msg.L
|
evel.Info ), info )
self.assertEqual( widget.messageCount( IECore.Msg.Level.Warning ), warning )
self.assertEqual( widget.messageCount( IECore.Msg.Level.Error ), error )
self.assertEqual( widget.messageCount(), debug + info + warning + error )
def testMessages( self ) :
w = GafferUI.MessageWidget()
self.assertCounts( w, 0, 0, 0, 0 )
m = Messages()
for i in range( 24 ) :
m.add( Message( IECore.MessageHandler.Level( i % 4 ), "testMessages", "message %d" % i ) )
w.setMessages( m )
self.assertEqual( w.getMessages(), m )
self.assertCounts( w, 6, 6, 6, 6 )
w.clear()
self.assertNotEqual( w.getMessages(), m )
self.assertCounts( w, 0, 0, 0, 0 )
def testMessageLevel( self ) :
levels = (
IECore.MessageHandler.Level.Debug, IECore.MessageHandler.Level.Info,
IECore.MessageHandler.Level.Warning, IECore.MessageHandler.Level.Info
)
w = GafferUI.MessageWidget()
self.assertEqual( w.getMessageLevel(), IECore.MessageHandler.Level.Info )
for l in levels :
w.setMessageLevel( l )
self.assertEqual( w.getMessageLevel(), l )
for l in levels :
w = GafferUI.MessageWidget( messageLevel = l )
self.assertEqual( w.getMessageLevel(), l )
def testCounts( self ) :
def msg( level ) :
IECore.msg( level, "test", "test" )
self.waitForIdle( 10 )
w = GafferUI.MessageWidget()
self.assertCounts( w, 0, 0, 0, 0 )
with w.messageHandler() :
msg( IECore.Msg.Level.Error )
self.assertCounts( w, 0, 0, 0, 1 )
msg( IECore.Msg.Level.Warning )
self.assertCounts( w, 0, 0, 1, 1 )
msg( IECore.Msg.Level.Info )
self.assertCounts( w, 0, 1, 1, 1 )
msg( IECore.Msg.Level.Debug )
self.assertCounts( w, 1, 1, 1, 1 )
msg( IECore.Msg.Level.Error )
msg( IECore.Msg.Level.Error )
self.assertCounts( w, 1, 1, 1, 3 )
w.clear()
self.assertCounts( w, 0, 0, 0, 0 )
def testForwarding( self ) :
w = GafferUI.MessageWidget()
h = IECore.CapturingMessageHandler()
w.forwardingMessageHandler().addHandler( h )
self.assertEqual( w.messageCount( IECore.Msg.Level.Error ), 0 )
self.assertEqual( len( h.messages ), 0 )
with w.messageHandler() :
IECore.msg( IECore.Msg.Level.Error, "test", "test" )
self.waitForIdle( 10 )
self.assertEqual( w.messageCount( IECore.Msg.Level.Error ), 1 )
self.assertEqual( len( h.messages ), 1 )
w.forwardingMessageHandler().removeHandler( h )
with w.messageHandler() :
IECore.msg( IECore.Msg.Level.Error, "test", "test" )
self.waitForIdle( 10 )
self.assertEqual( w.messageCount( IECore.Msg.Level.Error ), 2 )
self.assertEqual( len( h.messages ), 1 )
if __name__ == "__main__":
unittest.main()
|
vitan/blaze
|
blaze/expr/reductions.py
|
Python
|
bsd-3-clause
| 6,478 | 0.001698 |
from __future__ import absolute_import, division, print_function
import toolz
from toolz import first
import datashape
from datashape import Record, dshape, DataShape
from datashape import coretypes as ct
from datashape.predicates import isscalar, iscollection
from .core import common_subexpression
from .expressions import Expr, Symb
|
ol
class Reduction(Expr):
""" A c
|
olumn-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_child', 'axis', 'keepdims'
_dtype = None
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in self.axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in self.axis)
return DataShape(*(shape + (self._dtype,)))
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
try:
return self._child._name + '_' + type(self).__name__
except (AttributeError, ValueError, TypeError):
return type(self).__name__
class any(Reduction):
_dtype = ct.bool_
class all(Reduction):
_dtype = ct.bool_
class sum(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class max(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class min(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class mean(Reduction):
_dtype = ct.real
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_child', 'unbiased', 'axis', 'keepdims'
_dtype = ct.real
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
Reduction.__init__(self, child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_child', 'unbiased', 'axis', 'keepdims'
_dtype = ct.real
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
Reduction.__init__(self, child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
_dtype = ct.int_
class nunique(Reduction):
_dtype = ct.int_
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze.compute.python import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_child', 'names', 'values', 'keepdims'
def __init__(self, _child, names, values, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
@property
def dshape(self):
measure = Record(list(zip(self.names,
[v._dtype for v in self.values])))
if self.keepdims:
return DataShape(*((1,) * self._child.ndim + (measure,)))
else:
return DataShape(measure)
def __str__(self):
return 'summary(' + ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values)) + \
', keepdims=%s' % self.keepdims + ')'
def summary(keepdims=False, **kwargs):
items = sorted(kwargs.items(), key=first)
names = tuple(map(first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
raise ValueError()
return Summary(child, names, values, keepdims=keepdims)
summary.__doc__ = Summary.__doc__
from datashape.predicates import (iscollection, isscalar, isrecord, isboolean,
isnumeric)
from .expressions import schema_method_list, dshape_method_list
schema_method_list.extend([
(isboolean, set([any, all, sum])),
(isnumeric, set([mean, sum, mean, min, max, std, var])),
])
dshape_method_list.extend([
(iscollection, set([count, nunique, min, max])),
])
|
akoebbe/sweetiepi
|
sweetiepi/clocks/choices.py
|
Python
|
mit
| 1,719 | 0.000582 |
from django.utils.translation import ugettext_lazy as _
DIAL_CHOICES = (
('none', _('None')),
('din 41091.1', _('Dial with minute and hour markers (DIN 41091, Sect. 1)')),
('din 41091.3', _('Dial with hour markers (DIN 41091, Sect. 3)')),
('din 41091.4', _('Dial with hour numerals (DIN 41091, Part 4)')),
('swiss', _('Dial with minute and hour markers (Bauhaus)')),
('austria', _('Dial with minute and hour markers (Austria)')),
('points', _('Dial with hour dots')),
)
HOUR_HAND_CHOICES = (
('none', _('None')),
('din 41092.3', _('Pointed, bar-shaped hand (DIN 41092, Sect. 3)')),
('german', _('Blunt, bar-shaped hand (German Rail)')),
('siemens', _('Heart-shaped hand (Siemens)')),
('swiss', _('Blunt, javelin-shaped hand (Austria)')),
)
MINUTE_HAND_CHOICES = (
('none', _('Without minute hand')),
('din 41092.3', _('Pointed, bar-shaped hand (DIN 41092, Sect. 3)')),
('german', _('Blunt, bar-shaped hand (German Rail)')),
('siemens', _('Serpentine hand (Siemens)'))
|
,
('swiss', _('Blunt, javelin-shaped hand (Austria)')),
)
SECOND_HAND_CHOICES = (
('none', _('Without second h
|
and')),
('din 41071.1', _('Javelin-shaped hand (DIN 41071, Sect. 1)')),
('din 41071.2', _('Perforated pointer hand (DIN 41071, Sect. 2)')),
('german', _('Modern perforated pointer hand (German Rail)')),
('swiss', _('Disc-end hand (Switzerland)')),
)
MINUTE_HAND_MOVEMENT_CHOICES = (
('stepping', _('Stepping minute hand')),
('sweeping', _('Sweeping minute hand')),
)
SECOND_HAND_MOVEMENT_CHOICES = (
('stepping', _('Stepping second hand')),
('sweeping', _('Sweeping second hand')),
('swinging', _('Oscillating second hand')),
)
|
md1024/rams
|
uber/decorators.py
|
Python
|
agpl-3.0
| 18,151 | 0.003251 |
from uber.common import *
def swallow_exceptions(func):
"""
Don't allow ANY
|
Exceptions to be raised from this.
Use this ONLY where it's absolutely needed, such as dealing with locking functionality.
WARNING: DO NOT USE THIS UNLESS YOU KNOW WHAT YOU'RE DOING :)
"""
@wraps(func)
def swallow_exception(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
log.error("Exception raised, but
|
we're going to ignore it and continue.", exc_info=True)
return swallow_exception
def log_pageview(func):
@wraps(func)
def with_check(*args, **kwargs):
with sa.Session() as session:
try:
attendee = session.admin_account(cherrypy.session['account_id'])
except:
pass # we don't care about unrestricted pages for this version
else:
sa.PageViewTracking.track_pageview()
return func(*args, **kwargs)
return with_check
def redirect_if_at_con_to_kiosk(func):
@wraps(func)
def with_check(*args, **kwargs):
if c.AT_THE_CON and c.KIOSK_REDIRECT_URL:
raise HTTPRedirect(c.KIOSK_REDIRECT_URL)
return func(*args, **kwargs)
return with_check
def check_if_can_reg(func):
@wraps(func)
def with_check(*args, **kwargs):
if c.BADGES_SOLD >= c.MAX_BADGE_SALES:
return render('static_views/prereg_soldout.html')
elif c.BEFORE_PREREG_OPEN:
return render('static_views/prereg_not_yet_open.html')
elif c.AFTER_PREREG_TAKEDOWN and not c.AT_THE_CON:
return render('static_views/prereg_closed.html')
return func(*args, **kwargs)
return with_check
def get_innermost(func):
return get_innermost(func.__wrapped__) if hasattr(func, '__wrapped__') else func
def site_mappable(func):
func.site_mappable = True
return func
def suffix_property(func):
func._is_suffix_property = True
return func
def _suffix_property_check(inst, name):
if not name.startswith('_'):
suffix = '_' + name.rsplit('_', 1)[-1]
prop_func = getattr(inst, suffix, None)
if getattr(prop_func, '_is_suffix_property', False):
field_name = name[:-len(suffix)]
field_val = getattr(inst, field_name)
return prop_func(field_name, field_val)
suffix_property.check = _suffix_property_check
def csrf_protected(func):
@wraps(func)
def protected(*args, csrf_token, **kwargs):
check_csrf(csrf_token)
return func(*args, **kwargs)
return protected
def ajax(func):
"""decorator for Ajax POST requests which require a CSRF token and return JSON"""
@wraps(func)
def returns_json(*args, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
assert cherrypy.request.method == 'POST', 'POST required, got {}'.format(cherrypy.request.method)
check_csrf(kwargs.pop('csrf_token', None))
return json.dumps(func(*args, **kwargs), cls=serializer).encode('utf-8')
return returns_json
def ajax_gettable(func):
"""
Decorator for page handlers which return JSON. Unlike the above @ajax decorator,
this allows either GET or POST and does not check for a CSRF token, so this can
be used for pages which supply data to external APIs as well as pages used for
periodically polling the server for new data by our own Javascript code.
"""
@wraps(func)
def returns_json(*args, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(func(*args, **kwargs), cls=serializer).encode('utf-8')
return returns_json
def multifile_zipfile(func):
func.site_mappable = True
@wraps(func)
def zipfile_out(self, session):
zipfile_writer = BytesIO()
with zipfile.ZipFile(zipfile_writer, mode='w') as zip_file:
func(self, zip_file, session)
# must do this after creating the zip file as other decorators may have changed this
# for example, if a .zip file is created from several .csv files, they may each set content-type.
cherrypy.response.headers['Content-Type'] = 'application/zip'
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename=' + func.__name__ + '.zip'
return zipfile_writer.getvalue()
return zipfile_out
def _set_csv_base_filename(base_filename):
"""
Set the correct headers when outputting CSV files to specify the filename the browser should use
"""
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename=' + base_filename + '.csv'
def csv_file(func):
parameters = inspect.getargspec(func)
if len(parameters[0]) == 3:
func.site_mappable = True
@wraps(func)
def csvout(self, session, set_headers=True, **kwargs):
writer = StringIO()
func(self, csv.writer(writer), session, **kwargs)
output = writer.getvalue().encode('utf-8')
# set headers last in case there were errors, so end user still see error page
if set_headers:
cherrypy.response.headers['Content-Type'] = 'application/csv'
_set_csv_base_filename(func.__name__)
return output
return csvout
def set_csv_filename(func):
"""
Use this to override CSV filenames, useful when working with aliases and redirects to make it print the correct name
"""
@wraps(func)
def change_filename(self, override_filename=None, *args, **kwargs):
out = func(self, *args, **kwargs)
_set_csv_base_filename(override_filename or func.__name__)
return out
return change_filename
def check_shutdown(func):
@wraps(func)
def with_check(self, *args, **kwargs):
if c.UBER_SHUT_DOWN or c.AT_THE_CON:
raise HTTPRedirect('index?message={}', 'The page you requested is only available pre-event.')
else:
return func(self, *args, **kwargs)
return with_check
def credit_card(func):
@wraps(func)
def charge(self, session, payment_id, stripeToken, stripeEmail='ignored', **ignored):
if ignored:
log.error('received unexpected stripe parameters: {}', ignored)
try:
return func(self, session=session, payment_id=payment_id, stripeToken=stripeToken)
except HTTPRedirect:
raise
except:
error_text = \
'Got an error while calling charge' \
'(self, payment_id={!r}, stripeToken={!r}, ignored={}):\n{}\n' \
'\n IMPORTANT: This could have resulted in an attendee paying and not being' \
'marked as paid in the database. Definitely double check.'\
.format(payment_id, stripeToken, ignored, traceback.format_exc())
report_critical_exception(msg=error_text, subject='ERROR: MAGFest Stripe error (Automated Message)')
return traceback.format_exc()
return charge
def cached(func):
func.cached = True
return func
def cached_page(func):
from sideboard.lib import config as sideboard_config
innermost = get_innermost(func)
func.lock = RLock()
@wraps(func)
def with_caching(*args, **kwargs):
if hasattr(innermost, 'cached'):
fpath = os.path.join(sideboard_config['root'], 'data', func.__module__ + '.' + func.__name__)
with func.lock:
if not os.path.exists(fpath) or datetime.now().timestamp() - os.stat(fpath).st_mtime > 60 * 15:
contents = func(*args, **kwargs)
with open(fpath, 'wb') as f:
# Try to write assuming content is a byte first, then try it as a string
try:
f.write(contents)
except:
f.write(bytes(contents, 'UTF-8'))
with open(fpath, 'rb') as f:
return f.read()
else:
return func(*args, **kwargs)
return with_caching
def timed(func):
@wraps(func)
def with_timing(*args, **kwargs):
before
|
stoewer/nix-demo
|
utils/plotting.py
|
Python
|
bsd-3-clause
| 12,201 | 0.002951 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import numpy as np
import scipy.signal as sp
import random
import nix
import matplotlib.pyplot as plt
COLORS_BLUE_AND_RED = (
'dodgerblue', 'red'
)
COLORS_BLUE_GRADIENT = (
"#034980", "#055DA1", "#1B70E0", "#3786ED", "#4A95F7",
"#0C3663", "#1B4775", "#205082", "#33608F", "#51779E",
"#23B0DB", "#29CDFF", "#57D8FF", "#8FE5FF"
)
class Plotter(object):
"""
Plotter class for nix data arrays.
"""
def __init__(self, width=800, height=600, dpi=90, lines=1, cols=1, facecolor="white",
defaultcolors=COLORS_BLUE_GRADIENT):
"""
:param width: Width of the image in pixels
:param height: Height of the image in pixels
:param dpi: DPI of the image (default 90)
:param lines: Number of vertical subplots
:param cols: Number of horizontal subplots
:param facecolor: The background color of the plot
:param defaultcolors: Defaultcolors that are assigned to lines in each subplot.
"""
self.__width = width
self.__height = height
self.__dpi = dpi
self.__lines = lines
self.__cols = cols
self.__facecolor = facecolor
self.__defaultcolors = defaultcolors
self.__subplot_data = tuple()
for i in range(self.subplot_count):
self.__subplot_data += ([], )
self.__last_figure = None
# properties
@property
def subplot_count(self):
|
return self.__cols * self.__lines
@property
def subplot_data(self):
re
|
turn self.__subplot_data
@property
def defaultcolors(self):
return self.__defaultcolors
@property
def last_figure(self):
assert self.__last_figure is not None, "No figure available (method plot has to be called at least once)"
return self.__last_figure
# methods
def save(self, name):
"""
Saves the last figure to the specified location.
:param name: The name of the figure file
"""
self.last_figure.savefig(name)
def add(self, array, subplot=0, color=None, xlim=None, downsample=None, labels=None):
"""
Add a new data array to the plot
:param array: The data array to plot
:param subplot: The index of the subplot where the array should be added (starting with 0)
:param color: The color of the array to plot (if None the next default colors will be assigned)
:param xlim: Start and end of the x-axis limits.
:param downsample: True if the array should be sampled down
:param labels: Data array with labels that should be added to each data point of the array to plot
"""
color = self.__mk_color(color, subplot)
pdata = PlottingData(array, color, subplot, xlim, downsample, labels)
self.subplot_data[subplot].append(pdata)
def plot(self, width=None, height=None, dpi=None, lines=None, cols=None, facecolor=None):
"""
Plots all data arrays added to the plotter.
:param width: Width of the image in pixels
:param height: Height of the image in pixels
:param dpi: DPI of the image (default 90)
:param lines: Number of vertical subplots
:param cols: Number of horizontal subplots
:param facecolor: The background color of the plot
"""
# defaults
width = width or self.__width
height = height or self.__height
dpi = dpi or self.__dpi
lines = lines or self.__lines
cols = cols or self.__cols
facecolor = facecolor or self.__facecolor
# plot
figure, axis_all = plot_make_figure(width, height, dpi, cols, lines, facecolor)
for subplot, pdata_list in enumerate(self.subplot_data):
axis = axis_all[subplot]
pdata_list.sort()
event_like = Plotter.__count_event_like(pdata_list)
signal_like = Plotter.__count_signal_like(pdata_list)
for i, pdata in enumerate(pdata_list):
d1type = pdata.array.dimensions[0].dimension_type
shape = pdata.array.shape
nd = len(shape)
if nd == 1:
if d1type == nix.DimensionType.Set:
second_y = signal_like > 0
hint = (i + 1.0) / (event_like + 1.0) if event_like > 0 else None
plot_array_1d_set(pdata.array, axis, color=pdata.color, xlim=pdata.xlim, labels=pdata.labels,
second_y=second_y, hint=hint)
else:
plot_array_1d(pdata.array, axis, color=pdata.color, xlim=pdata.xlim,
downsample=pdata.downsample)
elif nd == 2:
if d1type == nix.DimensionType.Set:
plot_array_2d_set(pdata.array, axis, color=pdata.color, xlim=pdata.xlim,
downsample=pdata.downsample)
else:
plot_array_2d(pdata.array, axis, color=pdata.color, xlim=pdata.xlim,
downsample=pdata.downsample)
else:
raise Exception('Unsupported data')
axis.legend()
self.__last_figure = figure
# private methods
def __mk_color(self, color, subplot):
"""
If color is None, select one from the defaults or create a random color.
"""
if color is None:
color_count = len(self.defaultcolors)
count = len(self.subplot_data[subplot])
color = self.defaultcolors[count if count < color_count else color_count - 1]
if color == "random":
color = "#%02x%02x%02x" % (random.randint(50, 255), random.randint(50, 255), random.randint(50, 255))
return color
@staticmethod
def __count_signal_like(pdata_list):
sig_types = (nix.DimensionType.Range, nix.DimensionType.Sample)
count = 0
for pdata in pdata_list:
dims = pdata.array.dimensions
nd = len(dims)
if nd == 1 and dims[0].dimension_type in sig_types:
count += 1
elif nd == 2 and dims[0].dimension_type == nix.DimensionType.Set and dims[1].dimension_type in sig_types:
count += 1
return count
@staticmethod
def __count_image_like(pdata_list):
sig_types = (nix.DimensionType.Range, nix.DimensionType.Sample)
count = 0
for pdata in pdata_list:
dims = pdata.array.dimensions
nd = len(dims)
if nd == 2 and dims[0].dimension_type in sig_types and dims[1].dimension_type in sig_types:
count += 1
return count
@staticmethod
def __count_event_like(pdata_list):
count = 0
for pdata in pdata_list:
dims = pdata.array.dimensions
nd = len(dims)
if dims[0].dimension_type == nix.DimensionType.Set:
count += 1
return count
class PlottingData(object):
def __init__(self, array, color, subplot=0, xlim=None, downsample=False, labels=None):
self.array = array
self.dimensions = array.dimensions
self.shape = array.shape
self.rank = len(array.shape)
self.color = color
self.subplot = subplot
self.xlim = xlim
self.downsample = downsample
self.labels = labels
def __cmp__(self, other):
weights = lambda dims: [(1 if d.dimension_type == nix.DimensionType.Sample else 0) for d in dims]
return cmp(weights(self.array.dimensions), weights(other.array.dimensions))
def __lt__(self, other):
return self.__cmp__(other) < 0
def plot_make_figure(width, height, dpi, cols, lines, facecolor):
axis_all = []
figure = plt.figure(facecolor=facecolor, figsize=(width / dpi, height / dpi), dpi
|
lead-ratings/django-bulk-update
|
tests/fixtures.py
|
Python
|
mit
| 4,775 | 0.000209 |
from datetime import date, time, timedelta
from decimal import Decimal
import itertools
from django.utils import timezone
from six.moves import xrange
from .models import Person
def get_fixtures(n=None):
"""
Returns `n` dictionaries of `Person` objects.
If `n` is not specified it defaults to 6.
"""
_now = timezone.now().replace(microsecond=0) # mysql doesn't do microseconds. # NOQA
_date = date(2015, 3, 28)
_time = time(13, 0)
fixtures = [
{
'big_age': 59999999999999999, 'comma_separated_age': '1,2,3',
'age': -99, 'positive_age': 9999, 'positive_small_age': 299,
'small_age': -299, 'certified': False, 'null_certified': None,
'name': 'Mike', 'email': 'miketakeahike@mailinator.com',
'file_path': '/Users/user/fixtures.json', 'slug': 'mike',
'text': 'here is a dummy text',
'url': 'https://docs.djangoproject.com',
'height': Decimal('1.81'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 0.3,
'remote_addr': '192.0.2.30', 'my_file': 'dummy.txt',
'image': 'kitten.jpg', 'data': {'name': 'Mike', 'age': -99},
},
{
'big_age': 245999992349999, 'comma_separated_age': '6,2,9',
'age': 25, 'positive_age': 49999, 'positive_small_age': 315,
'small_age': 5409, 'certified': False, 'null_certified': True,
'name': 'Pete', 'email': 'petekweetookniet@mailinator.com',
'file_path': 'users.json', 'slug': 'pete', 'text': 'dummy',
'url': 'https://google.com', 'height': Decimal('1.93'),
'date_time': _now, 'date': _date, 'time': _time,
'float_height': 0.5, 'remote_addr': '127.0.0.1',
'my_file': 'fixtures.json',
'data': [{'name': 'Pete'}, {'name': 'Mike'}],
},
{
'big_age': 9929992349999, 'comma_separated_age': '6,2,9,10,5',
'age': 29, 'positive_age': 412399, 'positive_small_age': 23315,
'small_age': -5409, 'certified': False, 'null_certified': True,
'name': 'Ash', 'email': 'rashash@mailinator.com',
'file_path': '/Downloads/kitten.jpg', 'slug': 'ash',
'text': 'bla bla bla', 'url': 'news.ycombinator.com',
'height': Decimal('1.78'), 'date_time': _now,
'date': _date, 'time': _time,
'float_height': 0.8, 'my_file': 'dummy.png',
'data': {'text': 'bla bla bla', 'names': ['Mike', 'Pete']},
},
{
'big_age': 9992349234, 'comma_separated_age': '12,29,10,5',
'age': -29, 'positive_age': 4199, 'positive_small_age': 115,
'small_age': 909, 'certified': True, 'null_certified': False,
'name': 'Mary', 'email': 'marykrismas@mailinator.com',
'file_path': 'dummy.png', 'slug': 'mary',
'text': 'bla bla bla bla bla', 'url': 'news.ycombinator.com',
'height': Decimal('1.65'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 0,
'remote_addr': '2a02:42fe::4',
'data': {'names': {'name': 'Mary'}},
},
{
'big_age': 999234, 'comma_separated_age': '12,1,30,50',
'age': 1, 'positive_age': 99199, 'positive_small_age': 5,
'small_age': -909, 'certified': False, 'null_certified': False,
'name': 'Sandra', 'email': 'sandrasalamandr@mailinator.com',
'file_path': '/home/dummy.png', 'slug': 'sandra',
'text': 'this is a dummy
|
text', 'url': 'google.com',
'height': Decimal('1.59'), 'date_time': _now,
'date': _date, 'time':
|
_time, 'float_height': 2 ** 2,
'image': 'dummy.jpeg', 'data': {},
},
{
'big_age': 9999999999, 'comma_separated_age': '1,100,3,5',
'age': 35, 'positive_age': 1111, 'positive_small_age': 500,
'small_age': 110, 'certified': True, 'null_certified': None,
'name': 'Crystal', 'email': 'crystalpalace@mailinator.com',
'file_path': '/home/dummy.txt', 'slug': 'crystal',
'text': 'dummy text', 'url': 'docs.djangoproject.com',
'height': Decimal('1.71'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 2 ** 10,
'image': 'dummy.png', 'data': [],
},
]
n = n or len(fixtures)
fixtures = itertools.cycle(fixtures)
for _ in xrange(n):
yield next(fixtures)
def create_fixtures(n=None):
"""
Wrapper for Person.bulk_create which creates `n` fixtures
"""
Person.objects.bulk_create(Person(**person)
for person in get_fixtures(n))
|
appop/bitcoin
|
qa/rpc-tests/bip9-softforks.py
|
Python
|
mit
| 10,528 | 0.004084 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from test_framework.blockstore import BlockStore
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
sel
|
f.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, in
|
validate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 432)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 432)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
s
|
genius1611/Keystone
|
keystone/controllers/tenant.py
|
Python
|
apache-2.0
| 1,975 | 0.002532 |
from keystone import utils
from keystone.common import wsgi
import keystone.config as config
from keystone.logic.types.tenant import Tenant
from . import get_marker_limit_and_url
class TenantController(wsgi.Controller):
"""Controller for Tenant related operations"""
def __init__(self, options, is_service_operation=None):
self.options = options
self.is_service_operation = is_service_operation
@utils.wrap_error
def create_tenant(self, req):
tenant = utils.get_normalized_request_content(Tenant, req)
return utils.send_result(201, req,
config.SERVICE.create_tenant(utils.get_auth_token(req), tenant))
@utils.wrap_error
def get_tenants(self, req):
tenant_name = req.GET["name"] if "name" in req.GET else None
if tenant_name:
tenant = config.SERVICE.get_tenant_b
|
y_name(
utils.get_auth_token(req),
tenant_name)
return utils.send_result(200, req, tenant)
else:
marker, limit, url = get_marker_limit_and_url(req)
tenants = config.SE
|
RVICE.get_tenants(utils.get_auth_token(req),
marker, limit, url, self.is_service_operation)
return utils.send_result(200, req, tenants)
@utils.wrap_error
def get_tenant(self, req, tenant_id):
tenant = config.SERVICE.get_tenant(utils.get_auth_token(req),
tenant_id)
return utils.send_result(200, req, tenant)
@utils.wrap_error
def update_tenant(self, req, tenant_id):
tenant = utils.get_normalized_request_content(Tenant, req)
rval = config.SERVICE.update_tenant(utils.get_auth_token(req),
tenant_id, tenant)
return utils.send_result(200, req, rval)
@utils.wrap_error
def delete_tenant(self, req, tenant_id):
rval = config.SERVICE.delete_tenant(utils.get_auth_token(req),
tenant_id)
return utils.send_result(204, req, rval)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/logilab/__init__.py
|
Python
|
agpl-3.0
| 155 | 0 |
"""generated
|
file, don't modify or your data will be lost"""
try:
__import__('pkg_resources').declare_namespace(__name__)
ex
|
cept ImportError:
pass
|
tpbarron/pytorch-ppo
|
main.py
|
Python
|
mit
| 9,755 | 0.003383 |
import argparse
import sys
import math
from collections import namedtuple
from itertools import count
import gym
import numpy as np
import scipy.optimize
from gym import wrappers
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
from torch.autograd import Variable
from models import Policy, Value, ActorCritic
from replay_memory import Memory
from running_state import ZFilter
# from utils import *
torch.set_default_tensor_type('torch.DoubleTensor')
PI = torch.DoubleTensor([3.1415926])
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.995, metavar='G',
help='discount factor (default: 0.995)')
parser.add_argument('--env-name', default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--tau', type=float, default=0.97, metavar='G',
help='gae (default: 0.97)')
# parser.add_argument('--l2_reg', type=float, default=1e-3, metavar='G',
# help='l2 regularization regression (default: 1e-3)')
# parser.add_argument('--max_kl', type=float, default=1e-2, metavar='G',
# help='max kl value (default: 1e-2)')
# parser.add_argument('--damping', type=float, default=1e-1, metavar='G',
# help='damping (default: 1e-1)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=5000, metavar='N',
help='batch size (default: 5000)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--entropy-coeff', type=float, default=0.0, metavar='N',
help='coefficient for entropy cost')
parser.add_argument('--clip-epsilon', type=float, default=0.2, metavar='N',
help='Clipping for PPO grad')
parser.add_argument('--use-joint-pol-val', action='store_true',
help='whether to use combined policy and value nets')
args = parser.parse_args()
env = gym.make(args.env_name)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_spa
|
ce.shape[0]
env.seed(args.seed)
torch.manual_seed(args.seed)
if args.use_joint_pol_val:
ac_net = ActorCritic(num_inputs, num_actions)
opt_ac = optim.Adam(ac_net.parameters(), lr=0.001)
else:
policy_net = Policy(num_inputs, num_actions)
value_net = Value(num_inputs)
opt_policy = optim.Adam(policy
|
_net.parameters(), lr=0.001)
opt_value = optim.Adam(value_net.parameters(), lr=0.001)
def select_action(state):
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std = policy_net(Variable(state))
action = torch.normal(action_mean, action_std)
return action
def select_action_actor_critic(state):
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std, v = ac_net(Variable(state))
action = torch.normal(action_mean, action_std)
return action
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * torch.log(2 * Variable(PI)) - log_std
return log_density.sum(1)
def update_params_actor_critic(batch):
rewards = torch.Tensor(batch.reward)
masks = torch.Tensor(batch.mask)
actions = torch.Tensor(np.concatenate(batch.action, 0))
states = torch.Tensor(batch.state)
action_means, action_log_stds, action_stds, values = ac_net(Variable(states))
returns = torch.Tensor(actions.size(0),1)
deltas = torch.Tensor(actions.size(0),1)
advantages = torch.Tensor(actions.size(0),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
# kloldnew = policy_net.kl_old_new() # oldpi.pd.kl(pi.pd)
# ent = policy_net.entropy() #pi.pd.entropy()
# meankl = torch.reduce_mean(kloldnew)
# meanent = torch.reduce_mean(ent)
# pol_entpen = (-args.entropy_coeff) * meanent
action_var = Variable(actions)
# compute probs from actions above
log_prob_cur = normal_log_density(action_var, action_means, action_log_stds, action_stds)
action_means_old, action_log_stds_old, action_stds_old, values_old = ac_net(Variable(states), old=True)
log_prob_old = normal_log_density(action_var, action_means_old, action_log_stds_old, action_stds_old)
# backup params after computing probs but before updating new params
ac_net.backup()
advantages = (advantages - advantages.mean()) / advantages.std()
advantages_var = Variable(advantages)
opt_ac.zero_grad()
ratio = torch.exp(log_prob_cur - log_prob_old) # pnew / pold
surr1 = ratio * advantages_var[:,0]
surr2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * advantages_var[:,0]
policy_surr = -torch.min(surr1, surr2).mean()
vf_loss1 = (values - targets).pow(2.)
vpredclipped = values_old + torch.clamp(values - values_old, -args.clip_epsilon, args.clip_epsilon)
vf_loss2 = (vpredclipped - targets).pow(2.)
vf_loss = 0.5 * torch.max(vf_loss1, vf_loss2).mean()
total_loss = policy_surr + vf_loss
total_loss.backward()
torch.nn.utils.clip_grad_norm(ac_net.parameters(), 40)
opt_ac.step()
def update_params(batch):
rewards = torch.Tensor(batch.reward)
masks = torch.Tensor(batch.mask)
actions = torch.Tensor(np.concatenate(batch.action, 0))
states = torch.Tensor(batch.state)
values = value_net(Variable(states))
returns = torch.Tensor(actions.size(0),1)
deltas = torch.Tensor(actions.size(0),1)
advantages = torch.Tensor(actions.size(0),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
opt_value.zero_grad()
value_loss = (values - targets).pow(2.).mean()
value_loss.backward()
opt_value.step()
# kloldnew = policy_net.kl_old_new() # oldpi.pd.kl(pi.pd)
# ent = policy_net.entropy() #pi.pd.entropy()
# meankl = torch.reduce_mean(kloldnew)
# meanent = torch.reduce_mean(ent)
# pol_entpen = (-args.entropy_coeff) * meanent
action_var = Variable(actions)
action_means, action_log_stds, action_stds = policy_net(Variable(states))
log_prob_cur = normal_log_density(action_var, action_means, action_log_stds, action_stds)
action_means_old, action_log_stds_old, action_stds_old = policy_net(Variable(states), old=True)
log_prob_old = normal_log_density(action_var, action_means_old, action_log_stds_old, action_stds_old)
# backup params after computing probs but before updating new params
policy_net.backup()
advantages = (advantages - advantages.mean()) / advantages.std()
advantages_var = Variable(advantages)
opt_policy.zero_grad()
ratio = torch.exp(log_prob_cur - log_prob_old) # pnew / pold
surr1 = ratio * advantages_var[:,0]
surr2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * advantages_var[:,0]
policy_surr = -torch.min(surr1, surr2).mean()
policy_surr.backward()
|
kaedroho/wagtail
|
scripts/nightly/upload.py
|
Python
|
bsd-3-clause
| 615 | 0.001626 |
import json
import pathlib
import sys
import boto3
dist_folder = pathlib.Path.cwd() / 'dist'
try:
f = next(dist_folder.glob('*.whl'))
except StopIteration:
print("No .whl files found in ./dist!")
sys.exit()
print("Uploading", f.name)
s3 = boto3.client('s3')
s3.upload_file(str(f), 'releases.wagtail.io', 'nightly/dist/' + f.name, ExtraArgs={'ACL': 'pub
|
lic-read'})
print("Updating latest.json")
boto3.resource(
|
's3').Object('releases.wagtail.io', 'nightly/latest.json').put(
ACL='public-read',
Body=json.dumps({
"url": 'https://releases.wagtail.io/nightly/dist/' + f.name,
})
)
|
olafhauk/mne-python
|
mne/datasets/hf_sef/hf_sef.py
|
Python
|
bsd-3-clause
| 3,751 | 0 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Authors: Jussi Nurminen <jnu@iki.fi>
# License: BSD Style.
import tarfile
import os.path as op
import os
from ...utils import _fetch_file, verbose, _check_option
from ..utils import _get_path, logger, _do_path_update
@verbose
def data_path(dataset='evoked', path=None, force_update=False,
update_path=True, verbose=None):
u"""Get path to local copy of the high frequency SEF dataset.
Gets a local copy of the high frequency SEF MEG dataset [1]_.
Parameters
----------
dataset : 'evoked' | 'raw'
Whether to get the main dataset (evoked, structural and the rest) or
the separate dataset containing raw MEG data only
|
.
path : None | str
Where to look for the HF-SEF data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_HF_SEF_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the HF-SEF dataset
is not found under the given path, the data
will be automatically downlo
|
aded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_HF_SEF_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : str
Local path to the directory where the HF-SEF data is stored.
References
----------
.. [1] Nurminen, J., Paananen, H., Mäkelä, J. (2017): High frequency
somatosensory MEG dataset. https://doi.org/10.5281/zenodo.889234
"""
key = 'MNE_DATASETS_HF_SEF_PATH'
name = 'HF_SEF'
path = _get_path(path, key, name)
destdir = op.join(path, 'HF_SEF')
urls = {'evoked':
'https://zenodo.org/record/3523071/files/hf_sef_evoked.tar.gz',
'raw':
'https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz'}
hashes = {'evoked': '13d34cb5db584e00868677d8fb0aab2b',
'raw': '33934351e558542bafa9b262ac071168'}
_check_option('dataset', dataset, sorted(urls.keys()))
url = urls[dataset]
hash_ = hashes[dataset]
fn = url.split('/')[-1] # pick the filename from the url
archive = op.join(destdir, fn)
# check for existence of evoked and raw sets
has = dict()
subjdir = op.join(destdir, 'subjects')
megdir_a = op.join(destdir, 'MEG', 'subject_a')
has['evoked'] = op.isdir(destdir) and op.isdir(subjdir)
has['raw'] = op.isdir(megdir_a) and any(['raw' in fn_ for fn_ in
os.listdir(megdir_a)])
if not has[dataset] or force_update:
if not op.isdir(destdir):
os.mkdir(destdir)
_fetch_file(url, archive, hash_=hash_)
with tarfile.open(archive) as tar:
logger.info('Decompressing %s' % archive)
for member in tar.getmembers():
# strip the leading dirname 'hf_sef/' from the archive paths
# this should be fixed when making next version of archives
member.name = member.name[7:]
try:
tar.extract(member, destdir)
except IOError:
# check whether file exists but could not be overwritten
fn_full = op.join(destdir, member.name)
if op.isfile(fn_full):
os.remove(fn_full)
tar.extract(member, destdir)
else: # some more sinister cause for IOError
raise
os.remove(archive)
_do_path_update(path, update_path, key, name)
return destdir
|
ruohoruotsi/Wavelet-Tree-Synth
|
edward-examples/beta_bernoulli_map.py
|
Python
|
gpl-2.0
| 1,013 | 0.002962 |
#!/usr/bin/env python
"""
A simple coin flipping example. The model is written in TensorFlow.
Inspired by Stan's toy example.
Probability model
Prior: Beta
Likelihood: Bernoulli
Inference: Maximum a posteriori
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.stats import bernoulli, beta
class BetaBernoulli:
"""p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)"""
def __init__(self):
self.n_vars = 1
def log_prob(self, xs, zs):
log_prior = beta.logpdf(zs, a=1.0, b=1.0)
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], z))
for z in tf.unpack(zs)])
return log_lik + log_prior
ed.set_seed(42)
model = BetaBernoulli()
data = {'x': np.array([0
|
, 1, 0, 0, 0, 0, 0, 0, 0, 1])}
params = tf.sigmoid(tf.Variable(tf.random_normal([1])))
inference = ed.MAP(model, data, params=params)
inference.run(n_iter=100, n_print=10)
| |
cryptoprojects/ultimateonlinecash
|
share/rpcuser/rpcuser.py
|
Python
|
mit
| 1,125 | 0.005333 |
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.
|
argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(
|
salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to ultimateonlinecash.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
|
richasinha/redis-OS-project
|
src/setRedis.py
|
Python
|
bsd-3-clause
| 595 | 0.006723 |
import time
from subprocess import *
PATH = "/home/richie_rich/OSProj/redis-OS-project/src/redis-cli"
p1 = Popen([PATH], shell=True, stdin=PIPE)
p1.communicate(input="FLUSHALL")
strength = 1000000
rangeVal = strength + 1
string = "set key"
string1 =
|
""
count = 0
for i in xrange(1,rangeVal):
count = count + 1
string1 = string1 + string + str(i) + " val" + str(i) + "\n"
if (i % 1000) == 0 :
p1 = Popen([PATH], shell=True, stdin=PIP
|
E)
p1.communicate(input=string1)
string = "set key"
string1 = ""
print string1
print "Inserted %d items" %(count)
|
andreasfaerber/p2pool-feathercoin
|
p2pool/web.py
|
Python
|
gpl-3.0
| 25,473 | 0.00687 |
from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer, reactor
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, daemon_getinfo_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1
|
- my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_
|
proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.daemon_work.value['bits'].target),
block_value=node.daemon_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, daemon_getinfo_var.value, node.daemon_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
|
richshaffer/tornado-suds
|
tornado_suds/bindings/rpc.py
|
Python
|
lgpl-3.0
| 3,181 | 0.002829 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for the (WS) SOAP I{rpc/literal} and I{rpc/encoded} bindings.
"""
from logging import getLogger
from tornado_suds import *
from tornado_suds.mx.encoded import Encoded as MxEncoded
from tornado_suds.umx.encoded import Encoded as UmxEncoded
from tornado_suds.bindings.binding import Binding, envns
from tornado_suds.sax.element import Element
log = getLogger(__name__)
encns = ('SOAP-ENC', 'http://schemas.xmlsoap.org/soap/encoding/')
class RPC(Binding):
"""
RPC/Literal binding style.
"""
def param_defs(self, method):
return self.bodypart_types(method)
def envelope(self, header, body):
env = Binding.envelope(self, header, body)
env.addPrefix(encns[0], encns[1])
env.set('%s:encodingStyle' % envns[0],
'http://schemas.xmlsoap.org/soap/encoding/')
return env
def bodycontent(self, method, args, kwargs):
n = 0
root = self.method(method)
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
p = self.mkparam(method, pd, value)
if p is not None:
root.append(p)
n += 1
return root
def
|
replycontent(self, method, body):
return body
|
[0].children
def method(self, method):
"""
Get the document root. For I{rpc/(literal|encoded)}, this is the
name of the method qualifed by the schema tns.
@param method: A service method.
@type method: I{service.Method}
@return: A root element.
@rtype: L{Element}
"""
ns = method.soap.input.body.namespace
if ns[0] is None:
ns = ('ns0', ns[1])
method = Element(method.name, ns=ns)
return method
class Encoded(RPC):
"""
RPC/Encoded (section 5) binding style.
"""
def marshaller(self):
return MxEncoded(self.schema())
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxEncoded(self.schema())
else:
return RPC.unmarshaller(self, typed)
|
jaseg/python-lmap
|
testfnord.py
|
Python
|
bsd-2-clause
| 490 | 0.028571 |
#!/usr/bin/env python3
from lmap import ldap
from getpass import getpass
i
|
mport threading
pw = getpass()
def bind_fnord(num):
def do_teh_action():
ld = ldap.ldap('ldap://emmi.physik-pool.tu-berlin.de/')
ld.simple_bind('uid=jaseg,ou=people,ou=pcpool,ou=physik,o=tu-berlin,c=de', pw)
print(num, len(ld.search('ou=people,ou=pcpool,ou=physik,o=tu-berlin,c=de', filter='uid=jaseg')))
return do_teh_action
for i in range(100):
t = threading.Thread(target = bind_fnord
|
(i))
t.start()
|
passByReference/webcrawler
|
proj2/proj2/pipelines.py
|
Python
|
apache-2.0
| 285 | 0 |
# -*- coding:
|
utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class Proj2Pipeline(object):
def process_item(self, item, spider):
return
|
item
|
wd15/corr
|
corr-api/config.py
|
Python
|
mit
| 436 | 0 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
WTF_CSRF_ENABLED = T
|
rue
SECRET_KEY = '33stanlake#'
DEBUG = True
APP_TITLE = 'Cloud of Reproducible Records API'
VERSION = '0.1-dev
|
'
MONGODB_SETTINGS = {
'db': 'corr-production',
'host': '0.0.0.0',
'port': 27017
}
# STORMPATH_API_KEY_FILE = '~/.stormpath/apiKey.properties'
# STORMPATH_APPLICATION = 'sumatra-cloud'
# STORMPATH_REDIRECT_URL = '/dashboard'
|
googleapis/python-dialogflow-cx
|
google/cloud/dialogflowcx_v3beta1/services/pages/transports/base.py
|
Python
|
apache-2.0
| 7,215 | 0.001663 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import page as gcdc_page
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PagesTransport(abc.ABC):
"""Abstract transport class for Pages."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id:
|
Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
|
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_pages: gapic_v1.method.wrap_method(
self.list_pages, default_timeout=None, client_info=client_info,
),
self.get_page: gapic_v1.method.wrap_method(
self.get_page, default_timeout=None, client_info=client_info,
),
self.create_page: gapic_v1.method.wrap_method(
self.create_page, default_timeout=None, client_info=client_info,
),
self.update_page: gapic_v1.method.wrap_method(
self.update_page, default_timeout=None, client_info=client_info,
),
self.delete_page: gapic_v1.method.wrap_method(
self.delete_page, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_pages(
self,
) -> Callable[
[page.ListPagesRequest],
Union[page.ListPagesResponse, Awaitable[page.ListPagesResponse]],
]:
raise NotImplementedError()
@property
def get_page(
self,
) -> Callable[[page.GetPageRequest], Union[page.Page, Awaitable[page.Page]]]:
raise NotImplementedError()
@property
def create_page(
self,
) -> Callable[
[gcdc_page.CreatePageRequest], Union[gcdc_page.Page, Awaitable[gcdc_page.Page]]
]:
raise NotImplementedError()
@property
def update_page(
self,
) -> Callable[
[gcdc_page.UpdatePageRequest], Union[gcdc_page.Page, Awaitable[gcdc_page.Page]]
]:
raise NotImplementedError()
@property
def delete_page(
self,
) -> Callable[
[page.DeletePageRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
]:
raise NotImplementedError()
__all__ = ("PagesTransport",)
|
googlefonts/gftools
|
Lib/gftools/fix.py
|
Python
|
apache-2.0
| 28,567 | 0.001505 |
"""
Functions to fix fonts so they conform to the Google Fonts
specification:
https://github.com/googlefonts/gf-docs/tree/main/Spec
"""
from fontTools.misc.fixedTools import otRound
from fontTools.ttLib import TTFont, newTable, getTableModule
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
from fontTools.ttLib.tables._f_v_a_r import NamedInstance
from gftools.util.google_fonts import _KNOWN_WEIGHTS
from gftools.utils import (
download_family_from_Google_Fonts,
Google_Fonts_has_family,
font_stylename,
font_familyname,
family_bounding_box,
get_unencoded_glyphs,
normalize_unicode_marks,
partition_cmap,
typo_metrics_enabled,
validate_family,
unique_name,
)
from gftools.stat import gen_stat_tables
from os.path import basename, splitext
from copy import deepcopy
import logging
log = logging.getLogger(__name__)
__all__ = [
"remove_tables",
"add_dummy_dsig",
"fix_unhinted_font",
"fix_hinted_font",
"fix_fs_type",
"fix_weight_class",
"fix_fs_selection",
"fix_mac_style",
"fix_fvar_instances",
"update_nametable",
"fix_nametable",
"inherit_vertical_metrics",
"fix_vertical_metrics",
"fix_ascii_fontmetadata",
"drop_nonpid0_cmap",
"drop_mac_cmap",
"fix_pua",
"fix_isFixedPitch",
"drop_mac_names",
"drop_superfluous_mac_names",
"fix_font",
"fix_family",
"rename_font",
"fix_filename"
]
# The _KNOWN_WEIGHT_VALUES constant is used internally by the GF Engineering
# team so we cannot update ourselves. TODO (Marc F) unify this one day
WEIGHT_NAMES = _KNOWN_WEIGHTS
del WEIGHT_NAMES[""]
WEIGHT_NAMES["Hairline"] = 1
WEIGHT_NAMES["ExtraBlack"] = 1000
WEIGHT_VALUES = {v: k for k, v in WEIGHT_NAMES.items()}
UNWANTED_TABLES = frozenset(
[
"FFTM",
"TTFA",
"TSI0",
"TSI1",
"TSI2",
"TSI3",
"TSI5",
"prop",
"MVAR",
"Debg",
]
)
def remove_tables(ttFont, tables=None):
"""Remove unwanted tables from a font. The unwanted tables must belong
to the UNWANTED_TABLES set.
Args:
ttFont: a TTFont instance
tables: an iterable containing tables remove
"""
tables_to_remove = UNWANTED_TABLES if not tables else frozenset(tables)
font_tables = frozenset(ttFont.keys())
tables_not_in_font = tables_to_remove - font_tables
if tables_not_in_font:
log.warning(
f"Cannot remove tables '{list(tables_not_in_font)}' since they are "
f"not in the font."
)
required_tables = tables_to_remove - UNWANTED_TABLES
if required_tables:
log.warning(
f"Cannot remove tables '{list(required_tables)}' since they are required"
)
tables_to_remove = UNWANTED_TABLES & font_tables & tables_to_remove
if not tables_to_remove:
return
log.info(f"Removing tables '{list(tables_to_remove)}' from font")
for tbl in tables_to_remove:
del ttFont[tbl]
def add_dummy_dsig(ttFont):
"""Add a dummy dsig table to a font. Older versions of MS Word
require this table.
Args:
ttFont: a TTFont instance
"""
newDSIG = newTable("DSIG")
newDSIG.ulVersion = 1
newDSIG.usFlag = 0
newDSIG.usNumSigs = 0
newDSIG.signatureRecords = []
ttFont.tables["DSIG"] = newDSIG
def fix_unhinted_font(ttFont):
"""Improve the appearance of an unhinted font on Win platforms by:
- Add a new GASP table with a newtable that has a single
range which is set to smooth.
- Add a new prep table which is optimized for unhinted fonts.
Args:
ttFont: a TTFont instance
"""
gasp = newTable("gasp")
# Set GASP so all sizes are smooth
gasp.gaspRange = {0xFFFF: 15}
program = ttProgram.Program()
assembly = ["PUSHW[]", "511", "SCANCTRL[]", "PUSHB[]", "4", "SCANTYPE[]"]
program.fromAssembly(assembly)
prep = newTable("prep")
prep.program = program
ttFont["gasp"] = gasp
ttFont["prep"] = prep
def fix_hinted_font(ttFont):
"""Improve the appearance of a hinted font on Win platforms by enabling
the head table's flag 3.
Args:
ttFont: a TTFont instance
"""
if not 'fpgm' in ttFont:
return False, ["Skipping. Font is not hinted."]
old = ttFont["head"].flags
ttFont["head"].flags |= 1 << 3
return ttFont["head"].flags != old
def fix_fs_type(ttFont):
"""Set the OS/2 table's fsType flag to 0 (Installable embedding).
Args:
ttFont: a TTFont instance
"""
old = ttFont["OS/2"].fsType
ttFont["OS/2"].fsType = 0
return old != 0
def fix_weight_class(ttFont):
"""Set the OS/2 table's usWeightClass so it conforms to GF's supported
styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
Args:
ttFont: a TTFont instance
"""
old_weight_class = ttFont["OS/2"].usWeightClass
if 'fvar' in ttFont:
fvar = ttFont['fvar']
default_axis_values = {a.axisTag: a.defaultValue for a in fvar.axes}
v = default_axis_values.get('wght', None)
if v is not None:
ttFont["OS/2"].usWeightClass = int(v)
return ttFont["OS/2"].usWeightClass != old_weight_class
stylename = font_stylename(ttFont)
tokens = stylename.split()
# Order WEIGHT_NAMES so longest names are first
for style in sorted(WEIGHT_NAMES, key=lambda k: len(k), reverse=True):
if style in tokens:
ttFont["OS/2"].usWeightClass = WEIGHT_NAMES[style]
return ttFont["OS/2"].usWeightClass != old_weight_class
if "Italic" in tokens:
ttFont["OS/2"].usWeightClass = 400
return ttFont["OS/2"].usWeightClass != old_weight_class
raise ValueError(
f"Cannot determine usWeightClass because font style, '{stylename}' "
f"doesn't have a weight token which is in our known "
f"weights, '{WEIGHT_NAMES.keys()}'"
)
def fix_fs_selection(ttFont):
"""Fix the OS/2 table's fsSelection so it conforms to GF's supported
styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
Args:
ttFont: a TTFont instance
"""
stylename = font_stylename(ttFont)
tokens = set(stylename.split())
old_selection = fs_selection = ttFont["OS/2"].fsSelection
# turn off all bits except for bit 7 (USE_TYPO_METRICS)
fs_selection &= 1 << 7
if "Italic" in tokens:
fs_selection |= 1 << 0
if "Bold" in tokens:
fs_selection |= 1 << 5
# enable Regular bit for all other styles
if not tokens & set(["Bold", "Italic"]):
fs_selection |= 1 << 6
ttFont["OS/2"].fsSelection = fs_selection
return old_selection != fs_selection
def fix_mac_style(ttFont):
"""Fix the head table's macStyle so it conforms to GF's supported
styles table:
https://github.com/googlefonts/gf-docs/tree/main/Spec#supported-styles
Args:
ttFont: a TTFont instance
"""
stylename = font_stylename(ttFont)
tokens = set(stylename.split())
mac_style = 0
if "Italic" in tokens:
mac_style |= 1 << 1
if "Bold" in tokens:
mac_style |= 1 << 0
ttFont["head"].macStyle = mac_style
def fix_fvar_instances(ttFont):
"""Replace a variable font's fvar instances with a set of new instances
that conform to the Google Fonts instance spec:
https://github.com/googlefonts/gf-docs/tree/main/Spec#fvar-instances
Args:
ttFont: a TTFont instance
"""
if "fvar" not in ttFont:
raise ValueError("ttFont is not a variable font")
fvar = ttFont["fvar"]
default_axis_vals = {a.axisTag: a.defaultValue for a in fvar.axes}
stylename = font_stylename(ttFont)
is_italic = "Italic" in stylename
is_roman_and_italic = any(a for a in ("slnt", "ital") if a in default_axis_vals)
wght_axis = n
|
ext((a for a in fvar.axes if a.axisTag == "wght"), None)
wght_min = int(wght_axis.minValue)
wght_max = int(wght_axis.maxValue)
nametable = ttFont["name"]
|
def gen_instances(is_ital
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsratecontrol.py
|
Python
|
apache-2.0
| 5,380 | 0.036059 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsratecontrol(base_resource) :
""" Configuration for rate control resource. """
def __init__(self) :
self._tcpthreshold = 0
self._udpthreshold = 0
self._icmpthreshold = 0
self._tcprstthreshold = 0
@property
def tcpthreshold(self) :
ur"""Number of SYNs permitted per 10 milliseconds.
"""
try :
return self._tcpthreshold
except Exception as e:
raise e
@tcpthreshold.setter
def tcpthreshold(self, tcpthreshold) :
ur"""Number of SYNs permitted per 10 milliseconds.
"""
try :
self._tcpthreshold = tcpthreshold
except Exception as e:
raise e
@property
def udpthreshold(self) :
ur"""Number of UDP packets permitted per 10 milliseconds.
"""
try :
return self._udpthreshold
except Exception as e:
raise e
@udpthreshold.setter
def udpthreshold(self, udpthreshold) :
ur"""Number of UDP packets permitted per 10 milliseconds.
"""
try :
self._udpthreshold = udpthreshold
except Exception as e:
raise e
@property
def icmpthreshold(self) :
ur"""Number of ICMP packets permitted per 10 milliseconds.<br/>Default value: 100.
"""
try :
return self._icmpthreshold
except Exception as e:
raise e
@icmpthreshold.setter
def icmpthreshold(self, icmpthreshold) :
ur"""Number of ICMP packets permitted per 10 milliseconds.<br/>Default value: 100
"""
try :
self._icmpthreshold = icmpthreshold
except Exception as e:
raise e
@property
def tcprstthreshold(self) :
ur"""The number of TCP RST packets permitted per 10 milli second. zero means rate control is disabled and 0xffffffff means every thing is rate controlled.<br/>Default value: 100.
"""
try :
return self._tcprstthreshold
except Exception as e:
raise e
@tcprstthreshold.setter
def tcprstthreshold(self, tcprstthreshold) :
ur"""The number of TCP RST packets permitted per 10 milli second. zero means rate control is disabled and 0xffffffff means every thing is rate controlled.<br/>Default value: 100
"""
try :
self._tcprstthreshold = tcprstthreshold
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsratecontrol_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsratecontrol
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update nsratecontrol.
"""
try :
if type(resource) is not list :
updateresource = nsratecontrol()
updateresource.tcpthreshold = resource.tcpthreshold
updateresource.udpthreshold = resource.udpthreshold
updateresource.icmpthreshold = resource.icmpthreshold
updateresource.tcprstthreshold = resource.tcprstthreshold
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of nsratecontrol resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nsratecontrol()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_=""
|
) :
ur""" Use this API to fetch all the nsratecontrol resources that are configured on netscaler.
"""
try :
if not name :
obj = nsratecontrol()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
|
class nsratecontrol_response(base_response) :
def __init__(self, length=1) :
self.nsratecontrol = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsratecontrol = [nsratecontrol() for _ in range(length)]
|
goddardl/gaffer
|
python/GafferTest/TypedObjectPlugTest.py
|
Python
|
bsd-3-clause
| 8,984 | 0.059996 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQU
|
ENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILI
|
TY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class TypedObjectPlugTest( GafferTest.TestCase ) :
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].isInstanceOf( Gaffer.ObjectPlug.staticTypeId() ) )
def testSerialisationWithConnection( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"] = Gaffer.Node()
s["n2"]["t2"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, direction=Gaffer.Plug.Direction.Out )
s["n"]["t"].setInput( s["n2"]["t2"] )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].getInput().isSame( s2["n2"]["t2"] ) )
def testDefaultValue( self ) :
p = Gaffer.ObjectPlug( "p", defaultValue = IECore.IntVectorData( [ 1, 2, 3 ] ) )
self.assertEqual( p.defaultValue(), IECore.IntVectorData( [ 1, 2, 3 ] ) )
def testRunTimeTyped( self ) :
self.assertEqual( IECore.RunTimeTyped.baseTypeId( Gaffer.ObjectPlug.staticTypeId() ), Gaffer.ValuePlug.staticTypeId() )
def testAcceptsNoneInput( self ) :
p = Gaffer.ObjectPlug( "hello", Gaffer.Plug.Direction.In, IECore.IntData( 10 ) )
self.failUnless( p.acceptsInput( None ) )
def testBoolVectorDataPlug( self ) :
p = Gaffer.BoolVectorDataPlug( "p", defaultValue = IECore.BoolVectorData( [ True, False ] ) )
self.assertEqual( p.defaultValue(), IECore.BoolVectorData( [ True, False ] ) )
self.assertEqual( p.getValue(), IECore.BoolVectorData( [ True, False ] ) )
p.setValue( IECore.BoolVectorData( [ False ] ) )
self.assertEqual( p.getValue(), IECore.BoolVectorData( [ False ] ) )
self.assertRaises( Exception, p.setValue, IECore.IntData( 10 ) )
def testNullDefaultValue( self ) :
self.assertRaises( ValueError, Gaffer.ObjectPlug, "hello", defaultValue = None )
def testNullValue( self ) :
p = Gaffer.ObjectPlug( "hello", Gaffer.Plug.Direction.In, IECore.IntData( 10 ) )
self.assertRaises( ValueError, p.setValue, None )
def testSerialisationWithValueAndDefaultValue( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultValue = IECore.IntData( 10 ) )
s["n"]["t"].setValue( IECore.CompoundObject( { "a" : IECore.IntData( 20 ) } ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].isInstanceOf( Gaffer.ObjectPlug.staticTypeId() ) )
self.failUnless( s2["n"]["t"].defaultValue() == IECore.IntData( 10 ) )
self.failUnless( s2["n"]["t"].getValue() == IECore.CompoundObject( { "a" : IECore.IntData( 20 ) } ) )
@GafferTest.expectedFailure
def testSerialisationOfMeshPrimitives( self ) :
# right now we can only serialise types which define __repr__, but that
# isn't defined for all cortex types. this test should pass when we get round
# to defining it for MeshPrimitives - we should do the other primitives at the
# same time, obviously.
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultValue = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 10 ) ) ) )
s["n"]["t"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ) ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.assertEqual( s["n"]["t"].defaultValue(), s2["n"]["t"].defaultValue() )
self.assertEqual( s["n"]["t"].getValue(), s2["n"]["t"].getValue() )
def testConstructCantSpecifyBothInputAndValue( self ) :
out = Gaffer.ObjectPlug( "out", direction=Gaffer.Plug.Direction.Out, defaultValue=IECore.StringData( "hi" ) )
self.assertRaises( Exception, Gaffer.ObjectPlug, "in", input=out, value=IECore.IntData( 10 ) )
class TypedObjectPlugNode( Gaffer.Node ) :
def __init__( self, name="TypedObjectPlugNode" ) :
Gaffer.Node.__init__( self, name )
self.addChild(
Gaffer.ObjectPlug( "p", defaultValue = IECore.IntData( 1 ) ),
)
IECore.registerRunTimeTyped( TypedObjectPlugNode )
def testSerialisationOfStaticPlugs( self ) :
s = Gaffer.ScriptNode()
s["n"] = self.TypedObjectPlugNode()
s["n"]["p"].setValue( IECore.IntData( 10 ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.assertEqual( s2["n"]["p"].getValue(), IECore.IntData( 10 ) )
def testSetToDefault( self ) :
plane = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 10 ) ) )
plug = Gaffer.ObjectPlug( defaultValue = plane )
self.assertEqual( plug.getValue(), plane )
plug.setValue( IECore.SpherePrimitive() )
self.assertEqual( plug.getValue(), IECore.SpherePrimitive() )
plug.setToDefault()
self.assertEqual( plug.getValue(), plane )
def testValueType( self ) :
self.failUnless( Gaffer.ObjectPlug.ValueType is IECore.Object )
self.failUnless( Gaffer.BoolVectorDataPlug.ValueType is IECore.BoolVectorData )
self.failUnless( Gaffer.IntVectorDataPlug.ValueType is IECore.IntVectorData )
self.failUnless( Gaffer.FloatVectorDataPlug.ValueType is IECore.FloatVectorData )
self.failUnless( Gaffer.StringVectorDataPlug.ValueType is IECore.StringVectorData )
self.failUnless( Gaffer.V3fVectorDataPlug.ValueType is IECore.V3fVectorData )
self.failUnless( Gaffer.ObjectVectorPlug.ValueType is IECore.ObjectVector )
def testReadOnlySetValueRaises( self ) :
p = Gaffer.ObjectPlug( defaultValue = IECore.NullObject(), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.ReadOnly )
self.assertRaises( RuntimeError, p.setValue, IECore.IntData( 10 ) )
def testSetValueCopying( self ) :
p = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 1 ) )
i = IECore.IntData( 10 )
p.setValue( i )
self.failIf( p.getValue( _copy=False ).isSame( i ) )
|
AkihiroSuda/earthquake
|
docker/eq-init.py
|
Python
|
apache-2.0
| 1,499 | 0.008005 |
#!/usr/bin/python
"""
Init script for Earthquake Docker Image (osrg/earthquake)
Supported Env vars:
- EQ_DOCKER_PRIVILEGED
"""
import os
import prctl
import subprocess
import sys
def log(s):
print 'INIT: %s' % s
def is_privileged_mode():
has_env = os.getenv('EQ_DOCKER_PRIVILEGED')
has_cap = prctl.cap_permitted.sys_admin
if has_env and not has_cap:
raise RuntimeError('EQ_DOCKER_PRIVILEGED is set, but SYS_ADMIN cap is missing')
return has_env
def run_daemons(l):
for elem in l:
log('Starting daemon: %s' % elem)
rc = subprocess.call(elem)
if rc != 0:
|
log('Exiting with status %d..(%s)' % (rc, elem))
sys.exit(rc)
def run_command_and_
|
exit(l):
log('Starting command: %s' % l)
rc = subprocess.call(l)
log('Exiting with status %d..(%s)' % (rc, l))
sys.exit(rc)
def get_remaining_args():
return sys.argv[1:]
if __name__ == '__main__':
daemons = [
['service', 'mongodb', 'start']
]
run_daemons(daemons)
com = ['/bin/bash', '--login', '-i']
if is_privileged_mode():
log('Running with privileged mode. Enabling DinD, OVS, and Ryu')
com = ['wrapdocker', '/init.dind-ovs-ryu.sh']
else:
log('Running without privileged mode. Please set EQ_DOCKER_PRIVILEGED if you want to use Ethernet Inspector')
log('Earthquake is installed on /earthquake. Please refer to /earthquake/README.md')
run_command_and_exit(com + get_remaining_args())
|
sfalkner/pySMAC
|
pysmac/utils/state_merge.py
|
Python
|
agpl-3.0
| 11,573 | 0.009937 |
import os
import glob
import operator
import errno
import filecmp
import shutil
import numpy
from .smac_output_readers import *
def find_largest_file (glob_pattern):
""" Function to find the largest file matching a glob pattern.
Old SMAC version keep several versions of files as back-ups. This
helper can be used to find the largest file (which should contain the
final output). One could also go for the most recent file, but that
might fail when the data is copied.
:param glob_pattern: a UNIX style pattern to apply
:type glob_pattern: string
:returns: string -- largest file matching the pattern
"""
fns = glob.glob(glob_pattern)
if len(fns) == 0:
raise RuntimeError("No file matching pattern \'{}\' found!".format(glob_pattern))
f_name = ""
f_size = -1
for fn in fns:
s = os.lstat(fn).st_size
if (s > f_size):
f_size = s
f_name = fn
return(f_name)
def read_sate_run_folder(directory, rar_fn = "runs_and_results-it*.csv",inst_fn = "instances.txt" , feat_fn = "instance-features.txt" , ps_fn = "paramstrings-it*.txt"):
""" Helper function that can reads all information from a state_run folder.
To get all information of a SMAC run, several different files have
to be read. This function provides a short notation for gathering
all data at once.
:param directory: the location of the state_run_folder
:type directory: str
:param rar_fn: pattern to find the runs_and_results file
:type rar_fn: str
:param inst_fn: name of the instance file
:type inst_fn: str
:param feat_fn: name of the instance feature file. If this file is not found, pysmac assumes no instance features.
:type feat_fn: str
:param ps_fn: name of the paramstrings file
:type ps_fn: str
:returns: tuple -- (configurations returned by read_paramstring_file,\n
instance names returned by read_instance_file,\n
instance features returned by read_instance_features_file,\n
actual run data returned by read_runs_and_results_file)
"""
print(("reading {}".format(directory)))
configs = read_paramstrings_file(find_largest_file(os.path.join(directory,ps_fn)))
instance_names = read_instances_file(find_largest_file(os.path.join(directory,inst_fn)))
runs_and_results = read_runs_and_results_file(find_largest_file(os.path.join(directory, rar_fn)))
full_feat_fn = glob.glob(os.path.join(directory,feat_fn))
if len(full_feat_fn) == 1:
instance_features = read_instance_features_file(full_feat_fn[0])
else:
instance_features = None
return (configs, instance_names, instance_features, runs_and_results)
def state_merge(state_run_directory_list, destination,
check_scenario_files = True, drop_duplicates = False,
instance_subset = None):
""" Function to merge multiple state_run directories into a single
run to be used in, e.g., the fANOVA.
To take advantage of the data gathered in multiple independent runs,
the state_run folders have to be merged into a single directory that
resemble the same structure. This allows easy application of the
pyfANOVA on all run_and_results files.
:param state_run_directory_list: list of state_run folders to be merged
:type state_run_directory_list: list of str
:param destination: a directory to store the merged data. The folder is created if needed, and already existing data in that location is silently overwritten.
:type destination: str
:param check_scenario_files: whether to ensure that all scenario files in all state_run folders are identical. This helps to avoid merging runs with different settings. Note: Command-line options given to SMAC are not compared here!
:type check_scenario_files: bool
:param drop_duplicates: Defines how to handle runs with identical configurations. For deterministic algorithms the function's response should be the same, so dropping duplicates is safe. Keep in mind that every duplicate effectively puts more weight on a configuration when estimating parameter importance.
:type drop_duplicates: bool
:param instance_subset: Defines a list of instances that are used for the merge. All other instances are ignored. (Default: None, all instances are used)
:type instance_subset: list
"""
configurations = {}
instances = {}
runs_and_results = {}
ff_header= set()
i_confs = 1;
i_insts = 1;
# make sure all pcs files are the same
pcs_files = [os.path.join(d,'param.pcs') for d in state_run_directory_list]
if not all([filecmp.cmp(fn, pcs_files[0]) for fn in pcs_files[1:]]):
raise RuntimeError("The pcs files of the different runs are not identical!")
#check the scenario files if desired
scenario_files = [os.path.join(d,'scenario.txt') for d in state_run_directory_list]
if check_scenario_files and not all([filecmp.cmp(fn, scenario_files[0]) for fn in scenario_files[1:]]):
raise RuntimeError("The scenario files of the different runs are not identical!")
for directory in state_run_directory_list:
try:
confs, inst_names, tmp , rars = read_sate_run_folder(directory)
(header_feats, inst_feats) = tmp if tmp is not None else (None,None)
except:
print(("Something went wrong while reading {}. Skipping it.".format(directory)))
continue
# confs is a list of dicts, but dicts are not hashable, so they are
# converted into a tuple of (key, value) pairs and then sorted
confs = [tuple(sorted(d.items())) for d in confs]
# merge the configurations
for conf in confs:
if not conf in configurations:
configurations[conf] = {'index': i_confs}
i_confs += 1
# merge the instances
ignored_instance_ids = []
for i in range(len(inst_names)):
if instance_subset is not None and inst_names[i][0] not in instance_subset:
ignored_instance_ids.append(i)
continue
if not inst_names[i][0] in instances:
instances[inst_names[i][0]] = {'index': i_insts}
instances[inst_names[i][0]]['features'] = inst_feats[inst_names[i][0]] if inst_feats is not None else None
instances[inst_names[i][0]]['additional info'] = ' '.join(inst_names[i][1:]) if len(inst_names[i]) > 1 else None
i_insts += 1
else:
if (inst_feats is None):
if not (instances[inst_names[i][0]]['features'] is None):
raise ValueError("The data contains the same instance name ({}) twice, but once with and without features!".format(inst_names[i]))
elif not numpy.all(instances[inst_names[i][0]]['features'] == inst_feats[inst_names[i][0]]):
raise ValueError("The data contains the same instance name ({}) twice, but with different features!".format(inst_names[i]))
pass
# store the feature file header:
if header_feats is not None:
ff_header.add(",".join(header_feats))
if len(ff_header) != 1:
raise RuntimeError("Feature Files not consistent across runs!\n{}".format(header_feats))
if len(rars.shape) == 1:
rars = numpy.array([rars])
for ru
|
n in rars:
# get the local configuration and instance id
lcid, liid = int(run[0])-1, int(run[1])-1
if liid in ignored_instance_ids:
continue
# translate them into the global ones
gcid = configurations[confs[lcid]]['index']
giid = instances[inst_names[
|
liid][0]]['index']
# check for duplicates and skip if necessary
if (gcid, giid) in runs_and_results:
if drop_duplicates:
#print('dropped duplicate: configuration {} on instace {}'.format(
|
ampron/pyMTRX
|
pyMTRX/scripts/notebook_sheet.py
|
Python
|
gpl-3.0
| 12,420 | 0.008132 |
#!/usr/bin/python
# -*- encoding: UTF-8 -*-
'''MATRIX Log File Maker
Version: 2
This script will create a csv file that will be a table of settings for all
STM data recorded from the Omicron MATRIX software.
List of classes: -none-
List of functions:
main
'''
# built-in modules
import sys
import traceback
import os
import os.path
import re
import random
import time
import multiprocessing as mp
from pprint import pprint
import pdb
# 3rd-party modules
#sys.path.append('C:/Users/csykes/alex/Dropbox/ampPy/spm_dev/')
import pyMTRX
from pyMTRX.experiment import Experiment
#==============================================================================
def main( cwd='./', sdir=None, r=True, processes=mp.cpu_count(),
single_sheet=False, debug=False
):
if debug: print '*** DEBUG MODE ON ***'
t = time.time()
if cwd[-1] != '/':
cwd += '/'
files = os.listdir(cwd)
print 'looking for experiment files in "{}"'.format(cwd)
# find one experiment file and then move on
experiment_files = find_files(cwd, fext='mtrx', r=r)
print 'Found the following .mtrx files'
for fp in experiment_files:
print ' ' + os.path.basename(fp)
N_opened = []
try:
processes = int(processes)
except ValueError:
processes = 1
#END try
if processes < 1 or debug: processes = 1
if processes == 1:
for fp in experiment_files:
if not isinstance(sdir, basestring): sdir = os.path.dirname(fp)
N_opened.append(
create_experiment_log(fp, sdir=sdir, debug=debug)
)
# END for
else:
# Create worker pool and start all jobs
worker_pool = mp.Pool(processes=processes, maxtasksperchild=12)
print 'running in multiprocess mode: {} processes'.format(processes)
for fp in experiment_files:
if not isinstance(sdir, basestring): sdir = os.path.dirname(fp)
N_opened.append(
worker_pool.apply_async( wrapped_create_exlog,
args=(fp,sdir,debug),
)
)
# END for
worker_pool.close()
# Wait here for all work to complete
worker_pool.join()
# END if
N = 0
if processes == 1:
for n in N_opened: N += n
else:
for n in N_opened:
try:
N += n.get()
except Exception as err:
print err
# END try
# END for
# END if
t = time.time() - t
hours = int(t/3600)
minutes = int((t-3600*hours)/60)
seconds = int(t - 3600*hours - 60*minutes)
print 'Total run time: {:02d}:{:02d}:{:02d}'.format(
hours, minutes, seconds
)
print 'Average processing speed: {:.0f} files/min'.format(N/(t/60))
# END main
#==============================================================================
def wrapped_create_exlog(*args, **kwargs):
try:
return create_experiment_log(*args, **kwargs)
except Exception as err:
print '{}: {}'.format(args[0], repr(err))
return 0
# END try
# END wrapped_create_exlog
def create_experiment_log(exp_fp, sdir='./', debug=False):
cwd, exp_fn = os.path.split(exp_fp)
cwd += '/'
print 'loading ' + exp_fn
ex = Experiment(cwd + exp_fn, debug=debug)
# collect image files
# (*image file must be in experiment file AND a file in the directory)
all_files = list( set(ex.get_data_filenames()) & set(os.listdir(cwd)) )
img_files = [fn for fn in all_files if Experiment.is_image(fn)]
sts_files = [fn for fn in all_files if Experiment.is_point_spectrum(fn)]
dname_lkup = { 0: '00 trace up', 1: '01 retrace up',
2: '10 trace down', 3: '11 retrace down'
}
IMG_entries = []
STS_entries = []
for fn in sorted(img_files, key=lambda f: os.path.getctime(cwd+f)):
if debug: print 'loading "{}"'.format(fn)
# scns = [trace_up, retrace_up, trace_down, retrace_down]
scns = flatten_tree( ex.import_scan(cwd + fn) )
for i in range(len(scns)):
scns[i].props['direction'] = dname_lkup[i]
IMG_entries.append( make_scan_entry(scns[i]) )
#for crv in scns[i].spectra:
# STS_entries.append( make_spectrum_entry(crv, debug=debug) )
# END for
# END for
|
for fn in sts_files:
curves = ex.import_spectra(os.path.join(cwd, fn))
for crv in curves:
STS_entries.append( make_spectrum_entry(crv, debug=debug) )
# END for
IMG_entries.sort(key=lambda tup: tup[0])
STS_entries.sort(key=lambda tup: tup[0])
N_opened = len(IMG_entries) + len(STS_entries) + 1
save_name = re.sub(r'_0001\.mtrx$', '_settings.csv', exp_fn)
f = open(os.path.join(sdir, save_name), 'w')
columns = [ 'date/time (d)',
'sample', 'data set',
'index', 'rep', 'dir', 'channel',
'x (nm)', 'y (nm)',
'scan bias (V)', 'current setpoint (pA)',
'loop gain (%)', 'T_raster (ms)',
'points', 'lines',
'line width (nm)', 'image height (nm)', '', 'angle (deg)',
'No. STS',
'exp comment', 'img comment',
'file'
]
f.write(','.join(columns))
f.write('\n')
for t, ln in IMG_entries:
f.write(ln)
f.close()
save_name = re.sub(r'_0001\.mtrx$', '_settings_STS.csv', exp_fn)
f = open(os.path.join(sdir, save_name), 'w')
columns = [ 'date/time (d)',
'sample', 'data set',
'scan index', 'rep', 'dir', 'channel',
'spec index', 'rep', 'dir', 'channel',
'start voltage (V)', 'end voltage (V)',
'scan bias (V)', 'current setpoint (pA)',
'loop gain (%)', 'T_raster (ms)',
'points',
'exp comment', 'spectrum comments',
'file'
]
f.write(','.join(columns))
f.write('\n')
for t, ln in STS_entries:
f.write(ln)
f.close()
if len(os.path.join(sdir, save_name)) > 79:
print cwd + '\n ' + save_name
else:
print cwd + ' ' + save_name
# END if
return N_opened
# END create_experiment_log
#==============================================================================
def make_scan_entry(scn):
ls = []
# time
ls.append( str(scn.props['time']/86400.0 + 25569 - 4.0/24) )
# experiment sample
ls.append( csv_safe(scn.ex.sample) )
ls.append( csv_safe(scn.ex.data_set) )
# img index (scan, repetition, direction) and channel
ls.append(
'{index:03d},{rep:04d},{direction},{channel}'.format(**scn.props)
)
# scan location
ls.append('{}'.format(scn.props['XYScanner_X_Offset'].value * 1e9))
ls.append('{}'.format(scn.props['XYScanner_Y_Offset'].value * 1e9))
# scan voltage
ls.append('{}'.format(scn.props['GapVoltageControl_Voltage'].value))
# scan current
ls.append('{:0.1f}'.format(scn.props['Regulator_Setpoint_1'].value * 1e12))
# scan loop gain
ls.append('{:0.2f}'.format(scn.props['Regulator_Loop_Gain_1_I'].value))
# scan raster time
ls.append('{:0.3f}'.format(scn.props['XYScanner_Raster_Time'].value * 1e3))
# scan size in points and lines
ls.append(str(scn.props['XYScanner_Points'].value))
ls.append(str(scn.props['XYScanner_Lines'].value))
# scan size in physical units (nm)
ls.append('{:0.2f}'.format(scn.props['XYScanner_Width'].value * 1e9))
ls.append('{:0.2f}'.format(scn.props['XYScanner_Height'].value * 1e9))
# alert flag for parameter errors
if pyMTRX.size_change(scn):
ls.append('*')
else:
ls.append('')
# END if
# scan angle
ls.append('{:0.1f}'.format(scn.props['XYScanner_Angle'].value))
# number of linked point spectra
ls.append(str(len(scn.spectra)))
# experiment data set, comment, scan comment, and file name
ls.append( csv_safe(scn.ex.
|
|
rueckstiess/mtools
|
mtools/mlaunch/mlaunch.py
|
Python
|
apache-2.0
| 96,965 | 0.000423 |
#!/usr/bin/env python3
import argparse
import functools
import json
import os
import re
import signal
import socket
import ssl
import subprocess
import sys
import threading
import time
import warnings
from collections import defaultdict
from operator import itemgetter
import psutil
from mtools.util import OrderedDict
from mtools.util.cmdlinetool import BaseCmdLineTool
from mtools.util.print_table import print_table
from mtools.version import __version__
try:
import Queue
except ImportError:
import queue as Queue
try:
from pymongo import MongoClient as Connection
from pymongo import version_tuple as pymongo_version
from bson import SON
from io import BytesIO
from distutils.version import LooseVersion
from pymongo.errors import ConnectionFailure, AutoReconnect
from pymongo.errors import OperationFailure, ConfigurationError
except ImportError as e:
raise ImportError("Can't import pymongo. See "
"https://api.mongodb.com/python/current/ for "
"instructions on how to install pymongo: " + str(e))
class MongoConnection(Connection):
"""
MongoConnection class.
Wrapper around Connection (itself conditionally a MongoClient or
pymongo.Connection) to specify timeout and directConnection.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('directConnection', True)
kwargs.setdefault('serverSelectionTimeoutMS', 1)
# Set client application name for MongoDB 3.4+ servers
kwargs['appName'] = f'''mlaunch v{__version__}'''
Connection.__init__(self, *args, **kwargs)
def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None,
ssl_pymongo_options=None, tls_pymongo_options=None):
"""
Ping server and wait for response.
Ping a mongod or mongos every `interval` seconds until it responds, or
`timeout` seconds have passed. If `to_start` is set to False, will wait for
the node to shut down instead. This function can be called as a separate
thread.
If queue is provided, it will place the results in the message queue and
return, otherwise it will just return the result directly.
"""
host = 'localhost:%i' % port
start_time = time.time()
while True:
if (time.time() - start_time) > timeout:
if queue:
queue.put_nowait((port, False))
return False
try:
# make connection and ping host
con = MongoConnection(host,
**(ssl_pymongo_options or {}),
**(tls_pymongo_options or {}))
con.admin.command('ping')
if to_start:
if queue:
queue.put_nowait((port, True))
return True
else:
time.sleep(interval)
except Exception:
if to_start:
time.sleep(interval)
else:
if queue:
queue.put_nowait((port, True))
return True
def shutdown_host(port, username=None, password=None, authdb=None):
"""
Send the shutdown command to a mongod or mongos on given port.
This function can be called as a separate thread.
"""
host = 'localhost:%i' % port
try:
if username and password and authdb:
if authdb != "admin":
raise RuntimeError("given username/password is not for "
"admin database")
mc = MongoConnection(host, username=username, password=password)
else:
mc = MongoConnection(host)
try:
mc.admin.command('shutdown', force=True)
except AutoReconnect:
pass
except OperationFailure:
print("Error: cannot authenticate to shut down %s." % host)
return
except ConnectionFailure:
pass
else:
mc.close()
@functools.lru_cache()
def check_mongo_server_output(binary, argument):
"""Call mongo[d|s] with arguments such as --help or --version.
This is used only to check the server's output. We expect the server to
exit immediately.
"""
try:
proc = subprocess.Popen(['%s' % binary, argument],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, shell=False)
except OSError as exc:
print('Failed to launch %s' % binary)
raise exc
out, err = proc.communicate()
if proc.returncode:
raise OSError(out or err)
return out
class MLaunchTool(BaseCmdLineTool):
UNDOCUMENTED_MONGOD_ARGS = ['--nopreallocj', '--wiredTigerEngineConfigString']
UNSUPPORTED_MONGOS_ARGS = ['--wiredTigerCacheSizeGB', '--storageEngine']
UNSUPPORTED_CONFIG_ARGS = ['--oplogSize', '--storageEngine', '--smallfiles', '--nojournal']
def __init__(self, test=False):
BaseCmdLineTool.__init__(self)
# arguments
self.args = None
# startup parameters for each port
self.startup_info = {}
# data structures for the discovery feature
self.cluster_tree = {}
self.cluster_tags = defaultdict(list)
self.cluster_running = {}
# memoize ignored arguments passed to different binaries
self.ignored_arguments = {}
# config docs for replica sets (key is replica set name)
self.config_docs = {}
# shard connection strings
self.shard_connection_str = []
# ssl configuration to start mongod or mongos, or create a MongoClient
self.ssl_server_args = ''
self.ssl_pymongo_options = {}
# tls configuration to start mongod or mongos, or create a MongoClient
self.tls_server_args = ''
self.tls_pymongo_options = {}
# indicate if running in testing mode
self.test = test
# version of MongoDB server
self.current_version = self.getMongoDVersion()
def run(self, arguments=None):
"""
Main run method.
Called for all sub-commands and parameters. It sets up argument
parsing, then calls the sub-command method with the same n
|
ame.
"""
# set up argument parsing in run, so that subsequent calls
# to run can call different sub-commands
self.argparser = argparse.ArgumentParser()
self.argparser.add_argument('--version'
|
, action='version',
version=f'''mtools version {__version__} || Python {sys.version}''')
self.argparser.add_argument('--no-progressbar', action='store_true',
default=False,
help='disables progress bar')
self.argparser.description = ('script to launch MongoDB stand-alone '
'servers, replica sets and shards.')
# make sure init is default command even when specifying
# arguments directly
if arguments and arguments.startswith('-'):
arguments = 'init ' + arguments
# default sub-command is `init` if none provided
elif (len(sys.argv) > 1 and sys.argv[1].startswith('-') and
sys.argv[1] not in ['-h', '--help', '--version']):
sys.argv = sys.argv[0:1] + ['init'] + sys.argv[1:]
# create command sub-parsers
subparsers = self.argparser.add_subparsers(dest='command')
self.argparser._action_groups[0].title = 'commands'
self.argparser._action_groups[0].description = \
('init is the default command and can be omitted. To get help on '
'individual commands, run mlaunch <command> --help. Command line '
'arguments which are not handled by mlaunch will be passed '
'through to mongod/mongos if those options are listed in the '
'--help output for the current binary. For example: '
'--storageEngine, --logappend, or --config.')
# init command
helptext = ('initialize a new MongoDB environment and start '
|
gigglearrows/anniesbot
|
pajbot/models/setting.py
|
Python
|
mit
| 2,197 | 0 |
import logging
from collections import UserDict
from pajbot.models.db import DBManager, Base
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.mysql import TEXT
log = logging.getLogger('pajbot')
class Setting(Base):
__tablename__ = 'tb_settings'
id = Column(Integer, primary_key=True)
setting = Column(Strin
|
g(128))
value = Column(TEXT)
type = Column(String(32))
def __init__(self, setting, value, type):
self.id = None
self.setting = setting
self.value = value
self.type = type
|
def parse_value(self):
try:
if self.type == 'int':
return int(self.value)
elif self.type == 'string':
return self.value
elif self.type == 'list':
return self.value.split(',')
elif self.type == 'bool':
return int(self.value) == 1
else:
log.error('Invalid setting type: {0}'.format(self.type))
except Exception:
log.exception('Exception caught when loading setting')
return None
class SettingManager(UserDict):
def __init__(self, overrides={}):
UserDict.__init__(self)
self.db_session = DBManager.create_session()
self.default_settings = {
'broadcaster': 'test_broadcaster',
'ban_ascii': True,
'lines_offline': True,
'parse_pyramids': False,
'parse_emote_combo': False,
'check_links': True,
'warnings_enabled': True,
'warnings_total_chances': 2,
'warnings_redis_prefix': '',
'warnings_length': 600,
'warnings_base_timeout': 10,
}
self.default_settings.update(overrides)
def commit(self):
self.db_session.commit()
def reload(self):
self.data = self.default_settings
for setting in self.db_session.query(Setting):
parsed_value = setting.parse_value()
if parsed_value is not None:
self.data[setting.setting] = setting.parse_value()
return self
|
NetworkManager/NetworkManager-ci
|
features/steps/commands.py
|
Python
|
gpl-3.0
| 19,422 | 0.003398 |
import json
import os
import pexpect
import re
import time
from behave import step
import nmci
@step(u'Autocomplete "{cmd}" in bash a
|
nd execute')
def autocomplete_command(context, cmd):
bash = context.pexpect_spawn("bash")
bash.send(cmd)
bash.send('\t')
time.sleep(1)
bash.send('\r\n')
time.sleep(1)
bash.sendeof()
@step(u'Check RSS writable memory in noted value "{i2}" differs from "{i1}" less than "{dif}"')
def check_rss_rw_dif(context, i2, i1, dif):
# def sum_rss_writable_memory(cont
|
ext, pmap_raw):
# total = 0
# for line in pmap_raw.split("\n"):
# vals = line.split()
# if (len(vals) > 2):
# total += int(vals[2])
# return total
#
# sum2 = int(sum_rss_writable_memory(context, context.noted[i2]))
# sum1 = int(sum_rss_writable_memory(context, context.noted[i1]))
sum2 = int(context.noted[i2])
sum1 = int(context.noted[i1])
assert (sum1 + int(dif) > sum2), \
"rw RSS mem: %d + %s !> %d !" % (sum1, dif, sum2)
@step(u'Check noted value "{i2}" difference from "{i1}" is lower than "{dif}"')
def check_dif_in_values(context, i2, i1, dif):
assert (int(context.noted[i1].strip()) + int(dif)) > int(context.noted[i2].strip()), \
"Noted values: %s + %s !> %s !" % (context.noted[i1].strip(), dif, context.noted[i2].strip())
@step(u'Check noted values "{i1}" and "{i2}" are the same')
def check_same_noted_values(context, i1, i2):
assert context.noted[i1].strip() == context.noted[i2].strip(), \
"Noted values: %s != %s !" % (context.noted[i1].strip(), context.noted[i2].strip())
@step(u'Check noted values "{i1}" and "{i2}" are not the same')
def check_same_noted_values_equals(context, i1, i2):
assert context.noted[i1].strip() != context.noted[i2].strip(), \
"Noted values: %s == %s !" % (context.noted[i1].strip(), context.noted[i2].strip())
@step(u'Check noted output contains "{pattern}"')
def check_noted_output_contains(context, pattern):
assert re.search(pattern, context.noted['noted-value']) is not None, "Noted output does not contain the pattern %s" % pattern
@step(u'Check noted output does not contain "{pattern}"')
def check_noted_output_not_contains(context, pattern):
assert re.search(pattern, context.noted['noted-value']) is None, "Noted output contains the pattern %s" % pattern
@step(u'Execute "{command}"')
def execute_command(context, command):
assert context.command_code(command) == 0
@step(u'Execute "{command}" without waiting for process to finish')
def execute_command_nowait(context, command):
context.pexpect_service(command, shell=True)
@step(u'Execute "{command}" without output redirect')
def execute_command_noout(context, command):
context.run(command, stdout=None, stderr=None)
@step(u'Execute "{command}" for "{number}" times')
def execute_multiple_times(context, command, number):
orig_nm_pid = nmci.lib.nm_pid()
i = 0
while i < int(number):
context.command_code(command)
curr_nm_pid = nmci.lib.nm_pid()
assert curr_nm_pid == orig_nm_pid, 'NM crashed as original pid was %s but now is %s' %(orig_nm_pid, curr_nm_pid)
i += 1
@step(u'"{command}" fails')
def wait_for_process(context, command):
assert context.command_code(command) != 0
time.sleep(0.1)
@step(u'Restore hostname from the noted value')
def restore_hostname(context):
context.command_code('hostname %s' % context.noted['noted-value'])
time.sleep(0.5)
@step(u'Hostname is visible in log "{log}"')
@step(u'Hostname is visible in log "{log}" in "{seconds}" seconds')
def hostname_visible(context, log, seconds=1):
seconds = int(seconds)
orig_seconds = seconds
cmd = "grep $(hostname -s) '%s'" %log
while seconds > 0:
if context.command_code(cmd) == 0:
return True
seconds = seconds - 1
time.sleep(1)
raise Exception('Hostname not visible in log in %d seconds' % (orig_seconds))
@step(u'Hostname is not visible in log "{log}"')
@step(u'Hostname is not visible in log "{log}" for full "{seconds}" seconds')
def hostname_not_visible(context, log, seconds=1):
seconds = int(seconds)
orig_seconds = seconds
cmd = "grep $(hostname -s) '%s'" %log
while seconds > 0:
if context.command_code(cmd) != 0:
return True
seconds = seconds - 1
time.sleep(1)
raise Exception('Hostname visible in log after %d seconds' % (orig_seconds - seconds))
@step(u'Nameserver "{server}" is set')
@step(u'Nameserver "{server}" is set in "{seconds}" seconds')
@step(u'Domain "{server}" is set')
@step(u'Domain "{server}" is set in "{seconds}" seconds')
def get_nameserver_or_domain(context, server, seconds=1):
if context.command_code('systemctl is-active systemd-resolved.service -q') == 0:
# We have systemd-resolvd running
cmd = 'resolvectl dns; resolvectl domain'
else:
cmd = 'cat /etc/resolv.conf'
return check_pattern_command(context, cmd, server, seconds)
@step(u'Nameserver "{server}" is not set')
@step(u'Nameserver "{server}" is not set in "{seconds}" seconds')
@step(u'Domain "{server}" is not set')
@step(u'Domain "{server}" is not set in "{seconds}" seconds')
def get_nameserver_or_domain_not(context, server, seconds=1):
if context.command_code('systemctl is-active systemd-resolved.service -q') == 0:
# We have systemd-resolvd running
cmd = 'systemd-resolve --status |grep -A 100 Link'
else:
cmd = 'cat /etc/resolv.conf'
return check_pattern_command(context, cmd, server, seconds, check_type="not")
@step(u'Noted value contains "{pattern}"')
def note_print_property_b(context, pattern):
assert re.search(pattern, context.noted['noted-value']) is not None, \
"Noted value '%s' does not match the pattern '%s'!" % (context.noted['noted-value'], pattern)
@step(u'Note the output of "{command}" as value "{index}"')
def note_the_output_as(context, command, index):
if not hasattr(context, 'noted'):
context.noted = {}
# use nmci as embed might be big in general
context.noted[index] = nmci.command_output_err(command)[0].strip()
@step(u'Note the output of "{command}"')
def note_the_output_of(context, command):
if not hasattr(context, 'noted'):
context.noted = {}
# use nmci as embed might be big in general
context.noted['noted-value'] = nmci.command_output(command).strip()
def json_compare(pattern, out):
pattern_type = type(pattern)
if pattern_type is dict:
for x in pattern:
if x in out:
r = json_compare(pattern[x], out[x])
if r != 0:
return r
else:
return 1
return 0
elif pattern_type is list:
assert False, "TODO: compare lists soomehow"
else:
if out == pattern:
return 0
else:
return 1
def check_pattern_command(context, command, pattern, seconds, check_type="default", exact_check=False, timeout=180, maxread=100000, interval=1, json_check=False):
seconds = int(seconds)
orig_seconds = seconds
while seconds > 0:
proc = context.pexpect_spawn(command, shell=True, timeout=timeout, maxread=maxread, codec_errors='ignore')
if exact_check:
ret = proc.expect_exact([pattern, pexpect.EOF])
elif json_check:
proc.expect([pexpect.EOF])
out = proc.before
json_out = json.loads(out)
json_pattern = json.loads(pattern)
ret = json_compare(json_pattern, json_out)
else:
ret = proc.expect([pattern, pexpect.EOF])
if check_type == "default":
if ret == 0:
return True
elif check_type == "not":
if ret != 0:
return True
elif check_type == "full":
assert ret == 0, 'Pattern "%s" disappeared after %d seconds, ouput was:\n%s' % (pattern, orig_seconds-seconds, proc.before)
elif check_type == "not_full":
assert ret != 0, 'Pattern "%s" appeared after %d seconds, output was:\n%s%s' %
|
sloanyang/aquantic
|
Tools/Scripts/webkitpy/port/mac_unittest.py
|
Python
|
gpl-2.0
| 12,566 | 0.003342 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.port.mac import MacPort
from webkitpy.port import port_testcase
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2, MockProcess, ScriptError
from webkitpy.common.system.systemhost_mock import MockSystemHost
class MacTest(port_testcase.PortTestCase):
os_name = 'mac'
os_version = 'lion'
port_name = 'mac-lion'
port_maker = MacPort
def assert_skipped_file_search_paths(self, port_name, expected_paths, use_webkit2=False):
port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
self.assertEqual(port._skipped_file_search_paths(), expected_paths)
def test_default_timeout_ms(self):
super(MacTest, self).test_default_timeout_ms()
self.assertEqual(self.make_port(options=MockOptions(guard_malloc=True)).default_timeout_ms(), 350000)
example_skipped_file = u"""
# <rdar://problem/5647952> fast/events/mouseout-on-window.html needs mac DRT to issue mouse out events
fast/events/mouseout-on-window.html
# <rdar://problem/5643675> window.scrollTo scrolls a window with no scrollbars
fast/events/attempt-scroll-with-no-scrollbars.html
# see bug <rdar://problem/5646437> REGRESSION (r28015): svg/batik/text/smallFonts fails
svg/batik/text/smallFonts.svg
# Java tests don't work on WK2
java/
"""
example_skipped_tests = [
"fast/events/mouseout-on-window.html",
"fast/events/attempt-scroll-with-no-scrollbars.html",
"svg/batik/text/smallFonts.svg",
"java",
]
def test_tests_from_skipped_file_contents(self):
port = self.make_port()
self.assertEqual(port._tests_from_skipped_file_contents(self.example_skipped_file), self.example_skipped_tests)
def assert_name(self, port_name, os_version_string, expected):
host = MockSystemHost(os_name='mac', os_version=os_version_string)
port = self.make_port(host=host, port_name=port_name)
self.assertEqual(expected, port.name())
def test_tests_for_other_platforms(self):
platforms = ['mac', 'chromium-linux', 'mac-snowleopard']
port = self.make_port(port_name='mac-snowleopard')
platform_dir_paths = map(port._webkit_baseline_path, platforms)
# Replace our empty mock file system with one which has our expected platform directories.
port._filesystem = MockFileSystem(dirs=platform_dir_paths)
dirs_to_skip = port._tests_for_other_platforms()
self.assertIn('platform/chromium-linux', dirs_to_skip)
self.assertNotIn('platform/mac', dirs_to_skip)
self.assertNotIn('platform/mac-snowleopard', dirs_to_skip)
def test_version(self):
port = self.make_port()
self.assertTrue(port.version())
def test_versions(self):
# Note: these tests don't need to be exhaustive as long as we get path coverage.
self.assert_name('mac', 'snowleopard', 'mac-snowleopard')
self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard')
self.assert_name('mac-snowleopard', 'lion', 'mac-snowleopard')
self.assert_name('mac', 'lion', 'mac-lion')
self.assert_name('mac-lion', 'lion', 'mac-lion')
self.assert_name('mac', 'mountainlion', 'mac-mountainlion')
self.assert_name('mac-mountainlion', 'lion', 'mac-mountainlion')
self.assert_name('mac', 'mavericks', 'mac-mavericks')
self.assert_name('mac-mavericks', 'mountainlion', 'mac-mavericks')
self.assert_name('mac', 'future', 'mac-future')
self.assert_name('mac-future', 'future', 'mac-future')
self.assertRaises(AssertionError, self.assert_name, 'mac-tiger', 'leopard', 'mac-leopard')
def test_setup_environ_for_server(self):
port = self.make_port(options=MockOptions(leaks=True, guard_malloc=True))
env = port.setup_environ_for_server(port.driver_name())
self.assertEqual(env['MallocStackLogging'], '1')
self.assertEqual(env['DYLD_INSERT_LIBRARIES'], '/usr/lib/libgmalloc.dylib:/mock-build/libWebCoreTestShim.dylib')
def _assert_search_path(self, port_name, baseline_path, search_paths, use_webkit2=False):
port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
absolute_search_paths = map(port._webkit_ba
|
seline_path, search_paths)
self.assertEqual(port.baseline_path(), port._webkit_baseline_path(baseline_path))
self.assertEqual(port.baseline_search_path(), absolute_search_paths)
def test_baseline_search_path(self):
# Note that we don't need total coverage here, just path coverage, since this is all data driven.
self._assert_search_path('mac-s
|
nowleopard', 'mac-snowleopard', ['mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac'])
self._assert_search_path('mac-lion', 'mac-lion', ['mac-lion', 'mac-mountainlion', 'mac'])
self._assert_search_path('mac-mountainlion', 'mac-mountainlion', ['mac-mountainlion', 'mac'])
self._assert_search_path('mac-mavericks', 'mac', ['mac'])
self._assert_search_path('mac-future', 'mac', ['mac'])
self._assert_search_path('mac-snowleopard', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac'], use_webkit2=True)
self._assert_search_path('mac-lion', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-lion', 'mac-mountainlion', 'mac'], use_webkit2=True)
self._assert_search_path('mac-mountainlion', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-mountainlion', 'mac'], use_webkit2=True)
self._assert_search_path('mac-mavericks', 'mac-wk2', ['mac-wk2', 'wk2', 'mac'], use_webkit2=True)
self._assert_search_path('mac-future', 'mac-wk2', ['mac-wk2', 'wk2', 'mac'], use_webkit2=True)
def test_show_results_html_file(self):
port = self.make_port()
# Delay setting a should_log executive to avoid logging from MacPort.__init__.
port._executive = MockExecutive(should_log=True)
expected_logs = "MOCK popen: ['Tools/Scripts/run-safari', '--release', '--no-saved-state', '-NSOpen', 'test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_default_child_processes(self):
port = self.make_port(port_name='mac-lion')
# MockPlatformInfo only has 2 mock cores. T
|
thaim/ansible
|
lib/ansible/modules/network/exos/exos_facts.py
|
Python
|
mit
| 5,863 | 0.000682 |
#!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: exos_facts
version_added: "2.7"
author:
- "Lance Richardson (@hlrichardson)"
- "Ujwal Koamrla (@ujwalkomarla)"
short_description: Collect facts from devices running Extreme EXOS
description:
- Collects a base set of device facts from a remote device that
is running EXOS. This module prepends all of the base network
fact keys with C(ansible_net_<fact>). The facts module will
always collect a base set of facts from the device and can
enable or disable collection of additional facts.
notes:
- Tested against EXOS 22.5.1.7
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
type: list
default: ['!config']
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all and the resources like interfaces, vlans etc.
Can specify a list of values to include a larger subset.
Values can also be used with an initial C(M(!)) to specify that
a specific subset should not be collected.
Valid subsets are 'all', 'lldp_global'.
type: list
version_added: "2.9"
"""
EXAMPLES = """
- name: Gather all legacy facts
exos_facts:
gather_subset: all
- name: Gather only the config and default facts
exos_facts:
gather_subset: config
- name: do not gather hardware facts
exos_facts:
gather_subset: "!hardware"
- name: Gather legacy and resource facts
exos_facts:
gather_subset: all
gather_network_resources: all
- name: Gather only the lldp global resource facts and no legacy facts
exos_facts:
gather_
|
subset:
- '!all'
- '!min'
gather_network_resource
|
:
- lldp_global
- name: Gather lldp global resource and minimal legacy facts
exos_facts:
gather_subset: min
gather_network_resource: lldp_global
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
ansible_net_gather_network_resources:
description: The list of fact for network resource subsets collected from the device
returned: when the resource is configured
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All Primary IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.exos.argspec.facts.facts import FactsArgs
from ansible.module_utils.network.exos.facts.facts import Facts
def main():
"""Main entry point for AnsibleModule
"""
argument_spec = FactsArgs.argument_spec
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = ['default value for `gather_subset` '
'will be changed to `min` from `!config` v2.11 onwards']
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
tkf/railgun
|
tests/check_memory_leak.py
|
Python
|
mit
| 1,554 | 0.000644 |
from __future__ import print_function
import sys
import numpy
from arrayaccess import gene_class_ArrayAccess
from test_arrayaccess import LIST_CDT, LIST_NUM
def main(iter_num, list_num, calloc):
clibname = 'arrayaccess.so'
ArrayAc
|
cess = gene_class_ArrayAccess(clibname, len(list_num), LIST_CDT)
ArrayAccess._calloc_ = calloc
if iter_num <= 10:
printnow = range(iter_num)
else:
printnow = numpy.linspace(
0, iter_num, num=10, endpoint=False).astype(int)
num_d
|
ict = dict(zip(ArrayAccess.num_names, list_num)) # {num_i: 6, ...}
assert ArrayAccess._calloc_ is bool(calloc)
print('[*/%d]:' % iter_num, end=' ')
sys.stdout.flush()
for i in range(iter_num):
ArrayAccess(**num_dict)
if i in printnow:
print(i, end=' ')
sys.stdout.flush()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-n", "--nums", default='100, 10, 10, 10, 10',
help="comma separated numbers")
parser.add_option("-t", "--time", default=1000, type=int)
parser.add_option("-c", "--calloc", default=1, type=int)
(opts, args) = parser.parse_args()
if opts.nums:
list_num = eval('[%s]' % opts.nums)
if len(list_num) != len(LIST_NUM):
raise RuntimeError ('%s numbers are expected. %s given.'
% (len(LIST_NUM), len(list_num)))
else:
list_num = LIST_NUM
main(opts.time, list_num, bool(opts.calloc))
|
BrambleLLC/HackAZ-2016
|
server/webapp/models.py
|
Python
|
mit
| 1,957 | 0.006643 |
from __init__ import redis_db
from werkzeug.security import generate_password_hash, check_password_hash
from os import urandom
from base64 import b64encode
class User(object):
def __init__(self):
self.username = "" # required
self.password_hash = "" # required
self.phone_number = "" # required
self.emergency_contact = "" # not required
self.secret_key = b64encode(urandom(64)).decode("utf-8")
self.contacts = set() # can be empty
def set_password(self, password):
self.password_hash = generate_password_hash(password, method="pbkdf2:sha256",
|
salt_length=32)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def write_to_db(self):
user_dict = {"password_hash": self.password_hash, "phone_number": self.phone_number,
"secret_key": self.secret_key, "emergency_contact": self.emergency_contact}
redis_db.hmset(self.username, user_dict)
redis_db.delete(self.username + ":contacts")
if len(self.contacts):
redis_db.sadd(s
|
elf.username + ":contacts", *self.contacts)
def deauthenticate(self):
self.secret_key = b64encode(urandom(64)).decode("utf-8")
@classmethod
def get_from_db(cls, username):
user_dict = redis_db.hmget(username, ["password_hash", "phone_number", "secret_key", "emergency_contact"])
fetched_user = User()
fetched_user.username = username
fetched_user.password_hash = user_dict[0]
fetched_user.phone_number = user_dict[1]
fetched_user.secret_key = user_dict[2]
fetched_user.emergency_contact = user_dict[3]
if not fetched_user.password_hash or not fetched_user.phone_number or not fetched_user.secret_key:
return None
else:
fetched_user.contacts = redis_db.smembers(fetched_user.username + ":contacts")
return fetched_user
|
MediaKraken/MediaKraken_Deployment
|
source/common/common_network_steam.py
|
Python
|
gpl-3.0
| 1,746 | 0.000573 |
"""
Copyright (C) 2018 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; withou
|
t even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public
|
License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
from steam import SteamID
from bs4 import BeautifulSoup
from . import common_network
# https://developer.valvesoftware.com/wiki/Steam_Web_API
class CommonNetworkSteam:
"""
Class for interfacing with Valve Steam
"""
def __init__(self, access_token):
pass
def com_net_steam_id_from_user(user_name):
return SteamID.from_url('https://steamcommunity.com/id/%s', (user_name,))
def com_net_steam_game_server_data_download():
"""
Server ID SteamCMD > Steam Client > Anonymous Login > Notes
"""
steam_servers = []
data = BeautifulSoup(common_network.mk_network_fetch_from_url(
"https://developer.valvesoftware.com/wiki/Dedicated_Servers_List", None),
features="html.parser").find_all('table')[1]
rows = data.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
steam_servers.append([ele for ele in cols if ele])
print(steam_servers, flush=True)
return steam_servers
|
cgwire/zou
|
zou/app/blueprints/projects/__init__.py
|
Python
|
agpl-3.0
| 3,136 | 0.000319 |
from flask import Blueprint
from zou.app.utils.api import configure_api_from_blueprint
from .resources import (
AllProjectsResource,
OpenProjectsResource,
ProductionTeamResource,
ProductionTeamRemoveResource,
ProductionAssetTypeResource,
ProductionAssetTypeRemoveResource,
ProductionTaskTypeResource,
ProductionTaskTypeRemoveResource,
ProductionTaskTypesResource,
ProductionTaskStatusResource,
ProductionTaskStatusRemoveResource,
ProductionMetadataDescriptorResource,
ProductionMetadataDescriptorsResource,
ProductionMilestonesResource,
ProductionScheduleItemsResource,
ProductionTaskTypeScheduleItemsResource,
ProductionAssetTypesScheduleItemsResource,
ProductionEpisodesScheduleItemsResource,
ProductionSequencesScheduleItemsResource,
ProductionTimeSpentsResource,
)
routes = [
("/data/projects/open", OpenProjectsResource),
("/data/projects/all", AllProjectsResource),
("/data/projects/<project_id>/team", ProductionTeamResource),
(
"/data/projects/<project_id>/task-types",
ProductionTaskTypesResource,
),
(
"/data/projects/<project_id>/team/<person_id>",
ProductionTeamRemoveResource,
),
(
"/data/projects/<project_id>/settings/asset-types",
ProductionAssetTypeResource,
),
(
"/data/projects/<project_id>/settings/asset-types/<asset_type_id>",
ProductionAssetTypeRemoveResource,
),
(
|
"/data/projects/<project_id>/settings/task-types",
ProductionTaskTypeResource,
),
(
"/data/projects/<project_id>/settings/task-types/<task_type_id>",
ProductionTaskTypeRemoveResource,
),
(
"/data/projects/<project_id>/settings/task-status",
ProductionTaskStatusResource,
),
(
"/data/projects/<project_id>/settings/task-status/<task_status_
|
id>",
ProductionTaskStatusRemoveResource,
),
(
"/data/projects/<project_id>/metadata-descriptors",
ProductionMetadataDescriptorsResource,
),
(
"/data/projects/<project_id>/metadata-descriptors/<descriptor_id>",
ProductionMetadataDescriptorResource,
),
("/data/projects/<project_id>/milestones", ProductionMilestonesResource),
(
"/data/projects/<project_id>/schedule-items",
ProductionScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/task-types",
ProductionTaskTypeScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/<task_type_id>/asset-types",
ProductionAssetTypesScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/<task_type_id>/episodes",
ProductionEpisodesScheduleItemsResource,
),
(
"/data/projects/<project_id>/schedule-items/<task_type_id>/sequences",
ProductionSequencesScheduleItemsResource,
),
("/data/projects/<project_id>/time-spents", ProductionTimeSpentsResource),
]
blueprint = Blueprint("projects", "projects")
api = configure_api_from_blueprint(blueprint, routes)
|
Crop-R/django-mediagenerator
|
mediagenerator/management/commands/generatemedia.py
|
Python
|
bsd-3-clause
| 349 | 0.005731 |
from ...api
|
import generate_media, prepare_media
from django.core.management.base
|
import BaseCommand
class Command(BaseCommand):
help = 'Combines and compresses your media files and saves them in _generated_media.'
requires_model_validation = False
def handle(self, *args, **options):
prepare_media()
generate_media()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.