repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
JeffpanUK/NuPyTools
|
multiprocessTutorial.py
|
Python
|
mit
| 1,085 | 0.029493 |
# Written by Vamei
import os
import multiprocessing
import time
#==================
# input worker
def inputQ(queue, info):
# info = str(os.getpid()) + '(put):' + str(time.time())
queue.put(info)
# output worker
def outputQ(queue,lock):
info = queue.get()
print(info)
# lock.acquire()
# print (str(os.getpid()) + '(get):' + info)
# lock.release()
#===================
# Main
if __name__ == "__main__":
record1 = [] # store input processes
record2 = [] # store output processes
lock = multiprocessing.Lock() # To prevent messy print
queue = multiprocessing.Queue(3)
a = range(10)
# input processes
for i in a:
|
process = multiprocessing.Process(target=inputQ,args=(queue,i))
process.start()
record1.append(process)
# output processes
for i in range(10):
process = multiprocessing.Process(target=outputQ,args=(queue,lock))
process.start()
record2.append(process)
for p in record1:
p.join()
queue.close() # No more object will com
|
e, close the queue
for p in record2:
p.join()
|
spennihana/h2o-3
|
h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_ascharacter.py
|
Python
|
apache-2.0
| 734 | 0.013624 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_ascharacter():
"""
Python API test: h2o.frame.H2OFrame.ascharacter()
Copied from pyunit_ascharacter.py
"""
h2oframe = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv"))
newFrame = h2oframe['cylinders'].ascha
|
racter()
assert_is_type(newFrame, H2OFrame)
assert newFrame.isstring()[0], "h2o.H2OFrame.ascharacter() command is not working."
if __name__ == "__main__":
pyuni
|
t_utils.standalone_test(h2o_H2OFrame_ascharacter())
else:
h2o_H2OFrame_ascharacter()
|
rpedigoni/trackr
|
trackr/carriers/fake.py
|
Python
|
mit
| 572 | 0 |
from datetime import datetime
from .base import BaseCarrier
class FakeCarrier(BaseCarrier):
id = 'fake'
name =
|
'Fake Carrier'
def _track_single(self, object_id):
package = self.create_package(
object_id=object_id,
service_name='Default',
)
for i in range(1, 5):
package.add_tracking_info(
date=datetime.now(),
location='City {}'.format(i),
|
status='In transit {}'.format(i),
description='Wow',
)
return package
|
OaklandPeters/pyinterfaces
|
pyinterfaces/valueabc/__init__.py
|
Python
|
mit
| 498 | 0 |
"""
Provides ValueMeta metaclass - which allows its descendants to override
__instancecheck__ and __subcl
|
asscheck__ to be used as
|
*classmethods*
"""
from __future__ import absolute_import
__all__ = [
'ValueMeta',
'ValueABC',
'InterfaceType',
'ExistingDirectory',
'ExistingFile'
]
from .existing_directory import ExistingDirectory
from .existing_file import ExistingFile
from .interface_type import InterfaceType
from .valueabc import ValueABC
from .valuemeta import ValueMeta
|
jungla/ICOM-fluidity-toolbox
|
2D/RST/extract_Scalar_temp.py
|
Python
|
gpl-2.0
| 3,216 | 0.03949 |
try: paraview.simple
except: from paraview.simple import *
import numpy as np
from mpi4py import MPI
import os
import csv
from scipy import interpolate
import gc
import sys
gc.enable()
comm = MPI.COMM_WORLD
label = 'm_25_3b'
labelo = 'm_25_3b'
basename = 'mli'
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
tt = int(sys.argv[1]) - 1
labelo = sys.argv[2]
label = sys.argv[2]
basename = sys.argv[3]
field = sys.argv[4]
resx = int(sys.argv[5])
resy = int(sys.argv[6])
path = '/scratch/jmensa/'+label+'/'
Xlist = np.linspace(0,10000,resx)
Ylist = np.linspace(0,4000,resy)
#Xlist = np.linspace(0,10000,resx)
#Ylist = np.linspace(0,4000,resy)
Zlist = np.linspace(0,-50,51)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = np.cumsum(dl)
[X,Y] = np.meshgrid(Xlist,Ylist)
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
nl = len(Zlist)/size
ll = len(Zlist)%size
mli_pvtu = XMLPartitionedUnstructuredGridReader( FileName=[path+'/'+basename+'_'+str(tt)+'.pvtu'] )
mli_pvtu.PointArrayStatus = [field]
sliceFilter = Slice(mli_pvtu)
sliceFilter.SliceType.Normal = [0,0,1]
if rank
|
== 0:
Tr = np.zeros((len(Ylist),len(Xlist),len(Zlist)))
for n in range(nl+ll):
layer = n+rank*nl
print 'layer:', rank, layer
sliceFilter.SliceType.Origin = [0,0,-1*Zlist[layer]]
DataSliceFile = paraview.servermanager.Fetch(sliceFilter)
points = DataSliceFile.GetPoints()
numPoints = DataSliceFile.GetNumberOf
|
Points()
#
data=np.zeros((numPoints))
coords=np.zeros((numPoints,3))
#
for x in xrange(numPoints):
data[x] = DataSliceFile.GetPointData().GetArray(field).GetValue(x)
coords[x] = points.GetPoint(x)
Tr[:,:,layer] = interpolate.griddata((coords[:,0],coords[:,1]),data,(X,Y),method='linear')
# print rank, Tr[:,:,:]
if rank > 0:
Tr = np.zeros((len(Ylist),len(Xlist),nl))
for n in xrange(nl):
layer = n+rank*nl
print 'layer:', rank, layer
sliceFilter.SliceType.Origin = [0,0,-1*Zlist[layer]]
DataSliceFile = paraview.servermanager.Fetch(sliceFilter)
points = DataSliceFile.GetPoints()
numPoints = DataSliceFile.GetNumberOfPoints()
#
data=np.zeros((numPoints))
coords=np.zeros((numPoints,3))
#
for x in xrange(numPoints):
data[x] = DataSliceFile.GetPointData().GetArray(field).GetValue(x)
coords[x] = points.GetPoint(x)
Tr[:,:,n] = interpolate.griddata((coords[:,0],coords[:,1]),data,(X,Y),method='linear')
# print rank, Tr[:,:,:]
comm.send(nl*rank+ll, dest=0, tag=10)
comm.send(Tr, dest=0, tag=11)
if rank == 0:
for s in range(size-1):
print 's', s+1
l = comm.recv(source=s+1, tag=10)
print 'l', l
Tr[:,:,l:l+nl] = comm.recv(source=s+1, tag=11)
print Tr
fd = open('./csv/'+field+'_'+labelo+'_'+str(tt)+'.csv','w')
print Tr[:,:,:]
for z in xrange(len(Zlist)):
print z
for j in xrange(len(Ylist)):
for i in xrange(len(Xlist)):
fd.write(str(Tr[j,i,z])+', ')
fd.write('\n')
fd.close()
del mli_pvtu, Tr, coords, data, numPoints, points, DataSliceFile, sliceFilter
gc.collect()
|
herove/dotfiles
|
sublime/Packages/Package Control/package_control/providers/release_selector.py
|
Python
|
mit
| 2,521 | 0.003173 |
import re
import sublime
from ..versions import version_exclude_prerelease
def filter_releases(package, settings, releases):
"""
Returns all releases in the list of releases that are compatible with
the current platform and version of Sublime Text
:param package:
The name of the package
:param settings:
A dict optionally containing the `install_prereleases` key
:param releases:
A list of release dicts
:return:
A list of release dicts
"""
platform_selectors = [sublime.platform() + '-' + sublime.arch(),
sublime.platform(), '*']
install_prereleases = settings.get('install_prereleases')
allow_prereleases = install_prereleases is True
if not allow_prereleases and isinstance(install_prereleases, list) and package in install_prereleases:
allow_prereleases = True
if not allow_prereleases:
releases = version_exclude_prerelease(releases)
output = []
for release in releases:
platforms = release.get('platforms', '*')
if not isinstance(platforms, list):
platforms = [platforms]
matched = False
for selector in platform_selectors:
if selector in platforms:
matched = True
break
if not matched:
continue
# Default to '*' (for legacy reasons), see #604
if not is_compatible_version(release.get('sublime_text', '*')):
continue
ou
|
tput.append(release)
return output
|
def is_compatible_version(version_range):
min_version = float("-inf")
max_version = float("inf")
if version_range == '*':
return True
gt_match = re.match('>(\d+)$', version_range)
ge_match = re.match('>=(\d+)$', version_range)
lt_match = re.match('<(\d+)$', version_range)
le_match = re.match('<=(\d+)$', version_range)
range_match = re.match('(\d+) - (\d+)$', version_range)
if gt_match:
min_version = int(gt_match.group(1)) + 1
elif ge_match:
min_version = int(ge_match.group(1))
elif lt_match:
max_version = int(lt_match.group(1)) - 1
elif le_match:
max_version = int(le_match.group(1))
elif range_match:
min_version = int(range_match.group(1))
max_version = int(range_match.group(2))
else:
return None
if min_version > int(sublime.version()):
return False
if max_version < int(sublime.version()):
return False
return True
|
oaubert/TwitterFontana
|
backend/src/app.py
|
Python
|
mit
| 7,176 | 0.007664 |
import flask
import json
import bson
import os
from flask import request, redirect
import sys
from fontana import twitter
import pymongo
DEFAULT_PORT = 2014
DB = 'fontana'
connection = pymongo.Connection("localhost", 27017)
db = connection[DB]
latest_headers = {}
MODERATED_SIZE = 40
class MongoEncoder(json.JSONEncoder):
def default(self, obj, **kwargs):
if isinstance(obj, bson.ObjectId):
return str(obj)
else:
return json.
|
JSONEncoder.default(obj, **kwargs)
app = flask.Flask('fontana')
def twitter_authorisation_begin():
"""
Step 1 and 2 of the Twitter oAuth flow.
"""
callback = absolute_url('twitter_signin')
if 'next' in flask.request.args:
callback = '%s?next=%s' % (callback, flask.request.args['next
|
'])
try:
token = twitter.request_token(app.config, callback)
flask.session['twitter_oauth_token'] = token['oauth_token']
flask.session['twitter_oauth_token_secret'] = token['oauth_token_secret']
return flask.redirect(twitter.authenticate_url(token, callback))
except twitter.TwitterException, e:
return flask.abort(403, str(e))
def twitter_authorisation_done():
"""
Step 3 of the Twitter oAuth flow.
"""
if 'oauth_token' in flask.request.args:
token = flask.request.args
if flask.session['twitter_oauth_token'] != token['oauth_token']:
return flask.abort(403, 'oauth_token mismatch!')
auth = twitter.access_token(app.config, token)
flask.session['twitter_oauth_token'] = auth['oauth_token']
flask.session['twitter_oauth_token_secret'] = auth['oauth_token_secret']
flask.session['twitter_user_id'] = auth['user_id']
flask.session['twitter_screen_name'] = auth['screen_name']
if 'next' in flask.request.args:
return flask.redirect(flask.request.args['next'])
else:
return 'OK'
elif 'denied' in flask.request.args:
return flask.abort(403, 'oauth denied')
else:
return flask.abort(403, 'unknown sign in failure')
@app.route('/api/twitter/session/new/')
def twitter_signin():
"""
Handles the Twitter oAuth flow.
"""
args = flask.request.args
if not args or (len(args) == 1 and 'next' in args):
return twitter_authorisation_begin()
else:
return twitter_authorisation_done()
@app.route('/api/twitter/session/')
def twitter_session():
"""
Check for an active Twitter session. Returns a JSON response with the
active sceen name or a 403 if there is no active session.
"""
if not flask.session.get('twitter_user_id'):
return flask.abort(403, 'no active session')
return (json.dumps({
'screen_name': flask.session['twitter_screen_name']
}), 200, {'content-type': 'application/json'})
@app.route('/api/twitter/search/')
def twitter_search():
"""
Perform a Twitter search
"""
global latest_headers
if not flask.session.get('twitter_user_id'):
return flask.abort(403, 'no active session')
token = {
'oauth_token': flask.session['twitter_oauth_token'],
'oauth_token_secret': flask.session['twitter_oauth_token_secret']
}
# Find out last id
last = db['tweets'].aggregate( { '$group': { '_id':"", 'last': { '$max': "$id" } } } )
since_id = long(flask.request.args.get('since_id'))
params = dict(flask.request.args)
if last.get("ok") == 1 and last['result']:
last = long(last['result'][0]['last'])
params['since_id'] = max(last, since_id)
# Query twitter and cache result into DB
(text, status_code, headers) = twitter.search(app.config, token, params)
data = json.loads(text)
for s in data['statuses']:
s['exclude'] = s['text'].startswith('RT ')
s['classes'] = []
if s['text'].startswith('RT '):
s['classes'].append('RT')
if '?' in s['text']:
s['classes'].append('question')
# Use tweet id as _id so that save will replace existing tweets if necessary
s['_id'] = s['id']
db['tweets'].save(s)
latest_headers = dict(headers)
return (text, status_code, headers)
@app.route('/moderated')
def twitter_moderated():
"""
Return moderated posts
"""
return (json.dumps({ 'statuses': [ s for s in db['tweets'].find({ 'exclude': False }).sort([('id', -1)]).limit(MODERATED_SIZE) ]},
indent=None if request.is_xhr else 2,
cls=MongoEncoder),
200,
{'content-type': 'application/json'})
@app.route('/all')
def twitter_all():
"""
Return all cached posts
"""
since_id = long(request.values.get('since_id', 0))
return (json.dumps({ 'statuses': [ s for s in db['tweets'].find({ 'id': { '$gt': since_id } }).sort([ ('id', -1) ]) ]},
indent=None if request.is_xhr else 2,
cls=MongoEncoder),
200,
latest_headers)
@app.route('/exclude/<path:ident>')
def exclude(ident):
"""Exclude given post.
"""
db['tweets'].update( { 'id_str': ident },
{ '$set': { 'exclude': True } })
return redirect('/admin.html')
@app.route('/set_moderated/<int:length>')
def set_moderated_length(length):
"""Set moderated queue length
"""
global MODERATED_SIZE
if length > 2 and length < 100:
MODERATED_SIZE = length
return redirect('/admin.html')
@app.route('/include/<path:ident>')
def include(ident):
"""Include given post.
"""
db['tweets'].update( { 'id_str': ident },
{ '$set': { 'exclude': False } })
return redirect('/admin.html')
@app.route('/api/session/clear/', methods=['POST'])
def signout():
"""
Perform a sign out, clears the user's session.
"""
flask.session.clear()
return 'OK'
def absolute_url(name):
"""
Flask's url_for with added SERVER_NAME
"""
host = app.config['SERVER_NAME'] or ('localhost:' + str(DEFAULT_PORT))
url = flask.url_for(name)
return 'http://%s%s' % (host, url)
def devserver(extra_conf=None):
"""
Start a development server
"""
from werkzeug.wsgi import SharedDataMiddleware
# Load the "example" conf
root = app.root_path.split(os.path.dirname(__file__))[0]
conf = os.path.join(root, 'backend', 'var', 'conf', 'fontana-example.conf')
app.config.from_pyfile(conf)
if extra_conf:
app.config.from_pyfile(os.path.join(root, extra_conf))
# Serve the frontend files
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/': app.config['STATIC_DIR']
})
# Setup a index.html redirect for convenience sake.
app.route('/')(lambda: flask.redirect('index.html'))
# Run the development or production server
if app.config.get('PROD'):
app.run(debug=False, host='0.0.0.0', port=DEFAULT_PORT)
else:
app.run()
if __name__ == "__main__":
# This will get invoked when you run `python backend/src/fontana.py`
if len(sys.argv) == 2:
devserver(sys.argv[1])
else:
devserver()
|
jesonyang001/qarepo
|
askbot/context.py
|
Python
|
gpl-3.0
| 5,245 | 0.004385 |
"""Askbot template context processor that makes some parameters
from the django settings, all parameters from the askbot livesettings
and the application available for the templates
"""
import sys
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import simplejson
import askbot
from askbot import api
from askbot import models
from askbot import const
from askbot.conf import settings as askbot_settings
from askbot.search.state_manager import SearchState
from askbot.utils import url_utils
from askbot.utils.slug import slugify
from askbot.utils.html import site_url
from askbot.utils.translation import get_language
def application_settings(request):
"""The context processor function"""
#if not request.path.startswith('/' + settings.ASKBOT_URL):
# #todo: this is a really ugly hack, will only work
# #when askbot is installed not at the home page.
# #this will not work for the
# #heavy modders of askbot, because their custom pages
# #will not receive the askbot settings in the context
# #to solve this properly we should probably explicitly
# #add settings to the context per page
# return {}
my_settings = askbot_settings.as_dict()
my_settings['LANGUAGE_CODE'] = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE)
my_settings['MULTILINGUAL'] = getattr(settings, 'ASKBOT_MULTILINGUAL', False)
my_settings['LANGUAGES_DICT'] = dict(getattr(settings, 'LANGUAGES', []))
my_settings['ALLOWED_UPLOAD_FILE_TYPES'] = \
settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES
my_settings['ASKBOT_URL'] = settings.ASKBOT_URL
my_settings['STATIC_URL'] = settings.STATIC_URL
my_settings['IP_MODERATION_ENABLED'] = getattr(settings, 'ASKBOT_IP_MODERATION_ENABLED', False)
my_settings['USE_LOCAL_FONTS'] = getattr(
settings,
'ASKBOT_USE_LOCAL_FONTS',
False
)
my_settings['CSRF_COOKIE_NAME'] = settings.CSRF_COOKIE_NAME
my_settings['DEBUG'] = settings.DEBUG
my_settings['USING_RUNSERVER'] = 'runserver' in sys.argv
my_settings['ASKBOT_VERSION'] = askbot.get_version()
my_settings['LOGIN_URL'] = url_utils.get_login_url()
my_settings['LOGOUT_URL'] = url_utils.get_logout_url()
if my_settings['EDITOR_TYPE'] == 'tinymce':
tinymce_plugins = settings.TINYMCE_DEFAULT_CONFIG.get('plugins', '').split(',')
my_settings['TINYMCE_PLUGINS'] = map(lambda v: v.strip(), tinymce_plugins)
else:
my_settings['TINYMCE_PLUGINS'] = [];
my_settings['LOGOUT_REDIRECT_URL'] = url_utils.get_logout_redirect_url()
my_settings['USE_ASKBOT_LOGIN_SYSTEM'] = 'askbot.deps.django_authopenid' \
in settings.INSTALLED_APPS
current_language = get_language()
#for some languages we will start searching for shorter words
if current_language == 'ja':
#we need to open the search box and show info message about
#the japanese lang search
min_search_word_length = 1
else:
min_search_word_length = my_settings['MIN_SEARCH_WORD_LENGTH']
need_scope_links = askbot_settings.ALL_SCOPE_ENABLED or \
askbot_settings.UNANSWERED_SCOPE_ENABLED or \
(request.user.is_authenticated() and askbot_settings.FOLLOWED_SCOPE_ENABLED)
co
|
ntext = {
'base_url': site_url(''),
'empty_search_state': SearchState.get_empty(),
'min_search_word_length': min_search_word_length,
'current_language_code': current_language,
'settings': my_settings,
'moderation_items': api.get_info_on_moderation_items(request.user),
'need_scope_links': need_scope_links,
'noscript_url': const.DEPENDENCY_URLS['noscript'],
|
}
if askbot_settings.GROUPS_ENABLED:
#calculate context needed to list all the groups
def _get_group_url(group):
"""calculates url to the group based on its id and name"""
group_slug = slugify(group['name'])
return reverse(
'users_by_group',
kwargs={'group_id': group['id'], 'group_slug': group_slug}
)
#load id's and names of all groups
global_group = models.Group.objects.get_global_group()
groups = models.Group.objects.exclude_personal()
groups = groups.exclude(id=global_group.id)
groups_data = list(groups.values('id', 'name'))
#sort groups_data alphanumerically, but case-insensitive
groups_data = sorted(
groups_data,
lambda x, y: cmp(x['name'].lower(), y['name'].lower())
)
#insert data for the global group at the first position
groups_data.insert(0, {'id': global_group.id, 'name': global_group.name})
#build group_list for the context
group_list = list()
for group in groups_data:
link = _get_group_url(group)
group_list.append({'name': group['name'], 'link': link})
context['group_list'] = simplejson.dumps(group_list)
return context
|
aphaea/exc-MapReduce-UoE
|
three_wrd_seq_count/mapper.py
|
Python
|
mit
| 441 | 0.011338 |
#!/usr/bin/python
# Find all the three word sequences
import sys
for line in sys.stdin:
tok = line.strip().split()
if len(tok)>2:
for i in range(1, len(tok)-1):
word1, word2, word3 = tok[i-1],
|
tok[i], tok[i+1]
word_str = word1 + " " + word2 + " " + word3
print(word_str + "\t1")
|
else:
continue
|
engdan77/edoAutoHomeMobile
|
twisted/internet/interfaces.py
|
Python
|
mit
| 90,290 | 0.001207 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface documentation.
Maintainer: Itamar Shtull-Trauring
"""
from __future__ import division, absolute_import
from zope.interface import Interface, Attribute
from twisted.python import deprecate
from twisted.python.versions import Version
class IAddress(Interface):
"""
An address, e.g. a TCP C{(host, port)}.
Default implementations are in L{twisted.internet.address}.
"""
### Reactor Interfaces
class IConnector(Interface):
"""
Object used to interface between connections and protocols.
Each L{IConnector} manages one connection.
"""
def stopConnecting():
"""
Stop attempting to connect.
"""
def disconnect():
"""
Disconnect regardless of the connection state.
If we are connected, disconnect, if we are trying to connect,
stop trying.
"""
def connect():
"""
Try to connect to remote address.
"""
def getDestination():
"""
Return destination this will try to connect to.
@return: An object which provides L{IAddress}.
"""
class IResolverSimple(Interface):
def getHostByName(name, timeout = (1, 3, 11, 45)):
"""
Resolve the domain name C{name} into an IP address.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{twisted.internet.defer.Deferred}
@return: The callback of the Deferred that is returned will be
passed a string that represents the IP address of the
specified name, or the errback will be called if the
lookup times out. If multiple types of address records
are associated with the name, A6 records will be returned
in preference to AAAA records, which will be returned in
preference to A records. If there are multiple records of
the type to be returned, one will be selected at random.
@raise twisted.internet.defer.TimeoutError: Raised
(asynchronously) if the name cannot be resolved within the
specified timeout period.
"""
class IResolver(IResolverSimple):
def query(query, timeout=None):
"""
Dispatch C{query} to the method which can handle its type.
@type query: L{twisted.names.dns.Query}
@param query: The DNS query being issued, to which a response is to be
generated.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress(name, timeout=None):
"""
Perform an A record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress6(name, timeout=None):
"""
Perform an A6 record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupIPV6Address(name, timeout=None):
"""
Perform an AAAA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
|
tuple gives answers. The
|
second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailExchange(name, timeout=None):
"""
Perform an MX record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNameservers(name, timeout=None):
"""
Perform an NS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupCanonicalName(name, timeout=None):
"""
Perform a CNAME record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype:
|
folivetti/PI-UFABC
|
AULA_01/Python/operadores.py
|
Python
|
mit
| 2,187 | 0.038866 |
# para os tipos numericos temos os seguintes operadores:
# + - * / % **
print "Numeros inteiros:"
x = 10
y = 3
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # repare como o resultado eh um inteiro
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print x, "(",bin(x),") & ",y,"(",bin(y),") =", x&y # operador binario E
pri
|
nt x, "(",bin(x),") | ",y,"(",bin(y),") =", x|y # operador binario OU
print x, "(",bin(x),") ^ ",y,"(",bin(y),") =", x^y # operador binario XOU
print x," igual
|
a ",y,"? ", x==y
print x," diferente de ",y,"? ", x!=y
print x," maior que ",y,"? ", x>y
print x," menor que ",y,"? ", x<y
print x," maior ou igual a ",y,"? ", x>=y
print x," menor ou igual a ",y,"? ", x<=y
print "\nNumeros em ponto flutuante: "
x = 10.0
y = 3.0
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # agora eh um numero real
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print "\nNumeros complexos:"
x = 1 + 1j
y = 2 + 1j
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # agora eh um numero real
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print "\nVariaveis Booleanas:"
# agora x eh uma variavel booleana (logica)
x = True
y = False
print "Nao ", x, "=", not x
print x," ou ",y,"=",x or y
print x," e ",y,"=",x and y
x = 10
y = 3
print x, " maior que ", y, " OU ", x, " menor que ", y, "? ", x>y or x<y
print x, " maior que ", y, " E ", x, " menor que ", y, "? ", x>y and x<y
print "\nOperacao com Strings:"
x = "Ola "
y = "Mundo"
print x," + ",y," = ",x+y
print x," *2 = ",x*2
print x,"*2 + ",y," = ",x*2 + y
print "Letra na posicao 0 de x = ",x[0]
print "Concatenar as 3 primeiras letras de x com y = ",x[0:3] + y
# Operadores Relacionais
print "Tem 'a' em Ola? ", "a" in x
print "Nao tem 'b' em Ola? ", "b" not in x
|
tomkralidis/pycsw
|
pycsw/ogc/fes/fes2.py
|
Python
|
mit
| 19,080 | 0.002358 |
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from pycsw.core import util
from pycsw.core.etree import etree
from pycsw.ogc.gml import gml3
LOGGER = logging.getLogger(__name__)
MODEL = {
'Conformance': {
'values': [
'ImplementsQuery',
'ImplementsAdHocQuery',
'ImplementsFunctions',
'ImplementsResourceld',
'ImplementsMinStandardFilter',
'ImplementsStandardFilter',
'ImplementsMinSpatialFilter',
'ImplementsSpatialFilter',
'ImplementsMinTemporalFilter',
'ImplementsTemporalFilter',
'ImplementsVersionNav',
'ImplementsSorting',
'ImplementsExtendedOperators',
'ImplementsMinimumXPath',
'ImplementsSchemaElementFunc'
]
},
'GeometryOperands': {
'values': gml3.TYPES
},
'SpatialOperators': {
'values': ['BBOX', 'Beyond', 'Contains', 'Crosses', 'Disjoint',
'DWithin', 'Equals', 'Intersects', 'Overlaps', 'Touches', 'Within']
},
'ComparisonOperators': {
'fes20:PropertyIsBetween': {'opname': 'PropertyIsBetween', 'opvalue': 'and'},
'fes20:PropertyIsEqualTo': {'opname': 'PropertyIsEqualTo', 'opvalue': '='},
'fes20:PropertyIsGreaterThan': {'opname': 'PropertyIsGreaterThan', 'opvalue': '>'},
'fes20:PropertyIsGreaterThanOrEqualTo': {
'opname': 'PropertyIsGreaterThanOrEqualTo', 'opvalue': '>='},
'fes20:PropertyIsLessThan': {'opname': 'PropertyIsLessThan', 'opvalue': '<'},
'fes20:PropertyIsLessThanOrEqualTo': {
'opname': 'PropertyIsLessThanOrEqualTo', 'opvalue': '<='},
'fes20:PropertyIsLike': {'opname': 'PropertyIsLike', 'opvalue': 'like'},
'fes20:PropertyIsNotEqualTo': {'opname': 'PropertyIsNotEqualTo', 'opvalue': '!='},
'fes20:PropertyIsNull': {'opname': 'PropertyIsNull', 'opvalue': 'is null'},
},
'Functions': {
'length': {'returns': 'xs:string'},
'lower': {'returns': 'xs:string'},
'ltrim': {'returns': 'xs:string'},
'rtrim': {'returns': 'xs:string'},
'trim': {'returns': 'xs:string'},
'upper': {'returns': 'xs:string'},
},
'Ids': {
'values': ['csw30:id']
}
}
def parse(element, queryables, dbtype, nsmap, orm='sqlalchemy', language='english', fts=False):
"""OGC Filter object support"""
boq = None
is_pg = dbtype.startswith('postgresql')
tmp = element.xpath('fes20:And|fes20:Or|fes20:Not', namespaces=nsmap)
if len(tmp) > 0: # this is binary logic query
element_name = etree.QName(tmp[0]).localname
boq = ' %s ' % element_name.lower()
LOGGER.debug('Binary logic detected; operator=%s', boq)
tmp = tmp[0]
else:
tmp = element
pvalue_serial = [0]
def assign_param():
if orm == 'django':
return '%s'
param = ':pvalue%d' % pvalue_serial[0]
pvalue_serial[0] += 1
return param
def _get_comparison_expression(elem):
"""return the SQL expression based on Filter query"""
fname = None
matchcase = elem.attrib.get('matchCase')
wildcard = elem.attrib.get('wildCard')
singlechar = elem.attrib.get('singleChar')
expression = None
if wildcard is None:
wildcard = '%'
if singlechar is None:
singlechar = '_'
if (elem.xpath('child::*')[0].tag ==
util.nspath_eval('fes20:Function', nsmap)):
|
LOGGER.debug('fes20:Function detected')
if (elem.xpath('child::*')[0].attrib['name'] not in
MODEL['Functions']):
raise RuntimeError('Invalid fes20:Function: %s' %
(elem.xpath('child::*')[0].attrib['name']))
fname = elem.xpath('child::*')
|
[0].attrib['name']
try:
LOGGER.debug('Testing existence of fes20:ValueReference')
pname = queryables[elem.find(util.nspath_eval('fes20:Function/fes20:ValueReference', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' % (elem.find(util.nspath_eval('fes20:Function/fes20:ValueReference', nsmap)).text, str(err))) from err
else:
try:
LOGGER.debug('Testing existence of fes20:ValueReference')
pname = queryables[elem.find(
util.nspath_eval('fes20:ValueReference', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' %
(elem.find(util.nspath_eval('fes20:ValueReference',
nsmap)).text, str(err))) from err
if (elem.tag != util.nspath_eval('fes20:PropertyIsBetween', nsmap)):
if elem.tag in [util.nspath_eval('fes20:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
boolean_true = '\'true\''
boolean_false = '\'false\''
if dbtype == 'mysql':
boolean_true = 'true'
boolean_false = 'false'
return "%s = %s" % (_get_spatial_operator(queryables['pycsw:BoundingBox'], elem, dbtype, nsmap), boolean_true)
else:
pval = elem.find(util.nspath_eval('fes20:Literal', nsmap)).text
com_op = _get_comparison_operator(elem)
LOGGER.debug('Comparison operator: %s', com_op)
# if this is a case insensitive search
# then set the DB-specific LIKE comparison operator
LOGGER.debug('Setting csw:AnyText property')
anytext = queryables['csw:AnyText']['dbcol']
if ((matchcase is not None and matchcase == 'false') or
pname == anytext):
com_op = 'ilike' if is_pg else 'like'
if (elem.tag == util.nspath_eval('fes20:PropertyIsBetween', nsmap)):
com_op = 'between'
lower_boundary = elem.find(
util.nspath_eval('fes20:LowerBoundary/fes20:Literal',
nsmap)).text
upper_boundary = elem.find(
util.nspath_eval('fes20:UpperBoundary/fes20:Literal',
nsmap)).text
expression = "%s %s %s and %s" % \
(pname, com_op, assign_param(), assign_param())
values.append(lower_boundary)
values.append(upper_boundary)
else:
if pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
# do nothing, let FTS do conversion (#212)
pvalue = pval
else:
|
mineo/picard
|
picard/util/icontheme.py
|
Python
|
gpl-2.0
| 2,350 | 0.00213 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WI
|
THOUT ANY WARRANTY; without even th
|
e implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
import sys
from PyQt5 import QtGui
if sys.platform == 'win32':
_search_paths = []
else:
_search_paths = [
os.path.expanduser('~/.icons'),
os.path.join(os.environ.get('XDG_DATA_DIRS', '/usr/share'), 'icons'),
'/usr/share/pixmaps',
]
_current_theme = None
if 'XDG_CURRENT_DESKTOP' in os.environ:
desktop = os.environ['XDG_CURRENT_DESKTOP'].lower()
if desktop in ('gnome', 'unity'):
_current_theme = (os.popen('gsettings get org.gnome.desktop.interface icon-theme').read().strip()[1:-1]
or None)
elif os.environ.get('KDE_FULL_SESSION'):
_current_theme = (os.popen("kreadconfig --file kdeglobals --group Icons --key Theme --default crystalsvg").read().strip()
or None)
ICON_SIZE_MENU = ('16x16',)
ICON_SIZE_TOOLBAR = ('22x22',)
ICON_SIZE_ALL = ('22x22', '16x16')
def lookup(name, size=ICON_SIZE_ALL):
icon = QtGui.QIcon()
if _current_theme:
for path in _search_paths:
for subdir in ('actions', 'places', 'devices'):
fullpath = os.path.join(path, _current_theme, size[0], subdir, name)
if os.path.exists(fullpath + '.png'):
icon.addFile(fullpath + '.png')
for s in size[1:]:
icon.addFile(os.path.join(path, _current_theme, s, subdir, name) + '.png')
return icon
for s in size:
icon.addFile('/'.join([':', 'images', s, name]) + '.png')
return icon
|
arrayfire/arrayfire_python
|
arrayfire/statistics.py
|
Python
|
bsd-3-clause
| 8,560 | 0.003271 |
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
Statistical algorithms (mean, var, stdev, etc).
"""
from .library import *
from .array import *
def mean(a, weights=None, dim=None):
"""
Calculate mean along a given dimension.
Parameters
----------
a: af.Array
The input array.
weights: optional: af.Array. default: None.
Array to calculate the weighted mean. Must match size of the
input array.
dim: optional: int. default: None.
The dimension for which to obtain the mean from input data.
Returns
-------
output: af.Array
Array containing the mean of the input array along a given
dimension.
"""
if dim is not None:
out = Array()
if weights is None:
safe_call(backend.get().af_mean(c_pointer(out.arr), a.arr, c_int_t(dim)))
else:
safe_call(backend.get().af_mean_weighted(c_pointer(out.arr), a.arr, weights.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
if weights is None:
safe_call(backend.get().af_mean_all(c_pointer(real), c_pointer(imag), a.arr))
else:
safe_call(backend.get().af_mean_all_weighted(c_pointer(real), c_pointer(imag), a.arr, weights.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def var(a, isbiased=False, weights=None, dim=None):
"""
Calculate variance along a given dimension.
Parameters
----------
a: af.Array
The input array.
isbiased: optional: Boolean. default: False.
Boolean denoting population variance (false) or sample
variance (true).
weights: optional: af.Array. default: None.
Array to calculate for the weighted mean. Must match size of
the input array.
dim: optional: int. default: None.
The dimension for which to obtain the variance from input data.
Returns
-------
output: af.Array
Array containing the variance of the input array along a given
dimension.
"""
if dim is not None:
out = Array()
if weights is None:
safe_call(backend.get().af_var(c_pointer(out.arr), a.arr, isbiased, c_int_t(dim)))
else:
safe_call(backend.get().af_var_weighted(c_pointer(out.arr), a.arr, weights.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
if weights is None:
safe_call(backend.get().af_var_all(c_pointer(real), c_pointer(imag), a.arr, isbiased))
else:
safe_call(backend.get().af_var_all_weighted(c_pointer(real), c_pointer(imag), a.arr, weights.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def meanvar(a, weights=None, bias=VARIANCE.DEFAULT, dim=-1):
"""
Calculate mean and variance along a given dimension.
Parameters
----------
a: af.Array
The input array.
weights: optional: af.Array. default: None.
Array to calculate for the weighted mean. Must match size of
the input array.
bias: optional: af.VARIANCE. default: DEFAULT.
population variance(VARIANCE.POPULATION) or
sample variance(VARIANCE.SAMPLE).
dim: optional: int. default: -1.
The dimension for which to obtain the variance from input data.
Returns
-------
mean: af.Array
Array containing the mean of the input array along a given
dimension.
variance: af.Array
Array containing the variance of the input array along a given
dimension.
"""
mean_out = Array()
var_out = Array()
if weights is None:
weights = Array()
safe_call(backend.get().af_meanvar(c_pointer(mean_out.arr), c_pointer(var_out.arr),
a.arr, weights.arr, bias.value, c_int_t(dim)))
return mean_out, var_out
def stdev(a, dim=None):
"""
Calculate standard deviation along a given dimension.
Parameters
----------
a: af.Array
The input array.
dim: optional: int. default: None.
The dimension for which to obtain the standard deviation from
input data.
Returns
-------
output: af.Array
Array containing the standard deviation of the input array
along a given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_stdev(c_pointer(out.arr), a.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_stdev_all(c_pointer(real), c_pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def cov(a, isbiased=False, dim=None):
"""
Calculate covariance along a given dimension.
Parameters
----------
a: af.Array
The input array.
isbiased: optional: Boolean. default: False.
Boolean denoting whether biased estimate should be taken.
dim: optional: int. default: None.
The dimension for which to obtain the covariance from input data.
Returns
-------
output: af.Array
Array containing the covariance of the input array along a
given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_cov(c_pointer(out.arr), a.arr, isbiased, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_cov_all(c_pointer(real), c_pointer(imag), a.arr, isbiased))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def median(a, dim=None):
"""
Calculate median along a given dimension.
Parameters
----------
a: af.Array
The input array.
dim: optional: int. default: None.
The dimension for which to obtain the median from input data.
Returns
-------
output: af.Array
Array containing the median of the input array along a
given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_median(c_pointer(out.arr), a.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_median_all(c_pointer(real), c_pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def corrcoef(x, y):
"""
Calculate the correlation coefficient of the input arrays.
Parameters
----------
x: af.Array
The first input array.
y: af.Array
The second input array.
Returns
-------
|
output: af.Array
Array containing the correlation coefficient of the input arrays.
"""
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_corrcoef(c_pointer(real), c_pointer(imag), x.arr, y.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def topk(data, k, dim=0, order=TOPK.DEFAULT):
"""
Return top k elements along a single dimension.
Parameters
----------
|
data: af.Array
Input array to return k elements from.
k: scalar. default: 0
The number of elements to return from input array.
dim: optional: scalar. default: 0
The dimension along which the top k elements are
extracted. Note: at the moment, topk() only supports the
extraction of values along the first dimension.
order: optional: af.TOPK. default: af.TOPK.DEFAULT
The ordering of k extracted elements. Defaults to top k max valu
|
hasadna/anyway
|
alembic/versions/bd67c88713b8_user_permissions_management.py
|
Python
|
mit
| 3,369 | 0.003562 |
"""user permissions management
Revision ID: bd67c88713b8
Revises: 10023013f155
Create Date: 2021-03-31 21:31:47.278834
"""
# revision identifiers, used by Alembic.
import datetime
from sqlalchemy import orm, text
from sqlalchemy.engine.reflection import Inspector
revision = "bd67c88713b8"
down_revision = "10023013f155"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
ADMIN_EMAIL = "anyway@anyway.co.il"
def get_tables_names() -> [str]:
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
return tables
def upgrade():
# In cases of downgrade and upgrade those tables will no longer exits - and so the transaction will fail
tables_names = get_tables_names()
for table_name in [
"roles_users",
"roles",
"report_preferences",
"general_preferences",
]:
if table_name in tables_names:
op.drop_table(table_name)
if "user_oauth" in tables_names:
if "users" in tables_names:
op.drop_table("users")
op.rename_table("user_oauth", "users")
op.create_table(
"roles",
sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True, nullable=False),
sa.Column("name", sa.String(127), unique=True, index=True, nullable=False),
sa.Column("description", sa.String(255)),
sa.Column("create_date", sa.DateTime(), nullable=False, server_default=text("now()")),
)
op.create_table(
"users_to_roles",
sa.Column(
"user_id", sa.BigInteger(), sa.ForeignKey("users.id"), index=True, nullable=False
),
sa.Column("role_id", sa.Integer(), sa.ForeignKey("roles.id"), index=True, nullable=False),
sa.Colum
|
n("create_date", sa.DateTime(), nullable=False, server_
|
default=text("now()")),
sa.PrimaryKeyConstraint("user_id", "role_id"),
)
from anyway.models import Roles, Users, users_to_roles
bind = op.get_bind()
session = orm.Session(bind=bind)
role_admins = Roles(
name="admins",
description="This is the default admin role.",
create_date=datetime.datetime.now(),
)
session.add(role_admins)
res = session.query(Users).with_entities(Users.email).filter(Users.email == ADMIN_EMAIL).first()
if res is None:
user = Users(
user_register_date=datetime.datetime.now(),
user_last_login_date=datetime.datetime.now(),
email=ADMIN_EMAIL,
oauth_provider_user_name=ADMIN_EMAIL,
is_active=True,
oauth_provider="google",
is_user_completed_registration=True,
oauth_provider_user_id="unknown-manual-insert",
)
session.add(user)
user_id = (
session.query(Users).with_entities(Users.id).filter(Users.email == ADMIN_EMAIL).first()
)
role_id = session.query(Roles).with_entities(Roles.id).filter(Roles.name == "admins").first()
insert_users_to_roles = users_to_roles.insert().values(
user_id=user_id.id,
role_id=role_id.id,
create_date=datetime.datetime.now(),
)
session.execute(insert_users_to_roles)
session.commit()
def downgrade():
op.drop_table("users_to_roles")
op.drop_table("roles")
# Some of the changes are irreversible
|
boh1996/LectioAPI
|
scrapers/materials.py
|
Python
|
mit
| 2,102 | 0.03568 |
#!/usr/bin/python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import time
from time import mktime
import functions
def materials ( config ):
url = "https://www.lectio.dk/lectio/%s/MaterialOverview.aspx?holdelement_id=%s" % ( str(config["school_id"]), str(config["team_element_id"]) )
cookies = {}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "m_Content_MaterialsStudents"}) is None:
return {
"status" : False,
"error" : "Data not fo
|
und"
}
rows = soup.find("table", attrs={"id" : "m_Content_MaterialsStudents"}).findAll("tr")
materialsList = []
if len(rows) > 1:
rows.pop(0)
titleProg = re.compile(ur"(?P<authors>.*): (?P<title>.*), (?P<p
|
ublisher>.*)")
for row in rows:
elements = row.findAll("td")
title = unicode(elements[0].text.replace("\n", ""))
titleGroups = titleProg.match(title)
materialsList.append({
"title_text" : title,
"title" : titleGroups.group("title") if not titleGroups is None else title,
"publisher" : titleGroups.group("publisher") if not titleGroups is None else "",
"authors" : titleGroups.group("authors").split(", ") if not titleGroups is None else "",
"type" : "book" if unicode(elements[1].text.replace("\n", "")) == u"Bog" else unicode(elements[1].text.replace("\n", "")),
"book_storage" : True if elements[2].text.replace("\n", "") == "Ja" else False,
"comment" : unicode(elements[3].text.strip("\n").replace("\n", "")),
"ebook" : elements[4].text.strip("\n").replace("\n", "")
})
return {
"status" : "ok",
"materials" : materialsList
}
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Web/web_internet.py
|
Python
|
mit
| 1,421 | 0.000704 |
"""
@name: Modules/Web/web_internet.py
@author: D. Brian Kimmel
@contact: D.BrianKimm
|
el@gmail.com
@copyright: (c) 2013-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 3, 2013
@summary: Handle the "Internet" information for a house.
"""
__updated__ = '2020-01-02'
# Import system type stuff
from datetime import datetime
from nevow import athena
from nevow import loaders
import os
# Import PyMh files and modules.
from Modules.Core.data_objects import InternetConnectionInformation
from Modules.Computer.Web.w
|
eb_utils import GetJSONComputerInfo
from Modules.Core import logging_pyh as Logger
from Modules.Core.Utilities import json_tools
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
g_debug = 0
LOG = Logger.getLogger('PyHouse.webInternet ')
class InternetElement(athena.LiveElement):
""" a 'live' internet element.
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'internetElement.html'))
jsClass = u'internet.InternetWidget'
def __init__(self, p_workspace_obj, _p_params):
self.m_workspace_obj = p_workspace_obj
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
@athena.expose
def getInternetData(self):
l_computer = GetJSONComputerInfo(self.m_pyhouse_obj)
return l_computer
# ## END DBK
|
blakev/tappy
|
transifex.py
|
Python
|
bsd-2-clause
| 1,750 | 0.000571 |
# Copyright (c) 2015, Matt Layman
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
import os
import sys
import requests
API_URL = 'https://www.transifex.com/api/2'
LANGUAGES = [
'es',
'fr',
'it',
|
'nl',
]
def fetch_po_for(language, username, password):
print 'Downloading po file for {0} ...'.format(language)
po_api = '/project/tappy/resource/tappypot/translation/{0}/'.format(
language)
po_url = API_URL + po_api
params = {'file': '1'}
r = requests.
|
get(po_url, auth=(username, password), params=params)
if r.status_code == 200:
r.encoding = 'utf-8'
output_file = os.path.join(
here, 'tap', 'locale', language, 'LC_MESSAGES', 'tappy.po')
with open(output_file, 'wb') as out:
out.write(r.text.encode('utf-8'))
else:
print('Something went wrong fetching the {0} po file.'.format(
language))
def get_auth_from_conf(here):
transifex_conf = os.path.join(here, '.transifex.ini')
config = ConfigParser()
try:
with open(transifex_conf, 'r') as conf:
config.readfp(conf)
except IOError as ex:
sys.exit('Failed to load authentication configuration file.\n'
'{0}'.format(ex))
try:
username = config.get('auth', 'username')
password = config.get('auth', 'password')
except (NoOptionError, NoSectionError) as ex:
sys.exit('Oops. Incomplete configuration file: {0}'.format(ex))
return username, password
if __name__ == '__main__':
here = os.path.abspath(os.path.dirname(__file__))
username, password = get_auth_from_conf(here)
for language in LANGUAGES:
fetch_po_for(language, username, password)
|
mozilla/popcorn_maker
|
popcorn_gallery/notifications/models.py
|
Python
|
bsd-3-clause
| 906 | 0 |
from django.db import models
from django_extensions.db.fields import CreationDateTimeField
from tower import ugettext_lazy as _
from .managers import NoticeLiveManager
class Notice(models.Model):
LIVE = 1
REMOVED = 2
|
STATUS_CHOICES = (
(LIVE, _('Published')),
(REMOVED, _('Unpublished')),
)
title = models.CharField(max_length=255)
body = models.TextField()
created = CreationDateTimeField()
status = models.Integer
|
Field(choices=STATUS_CHOICES, default=LIVE)
end_date = models.DateTimeField(blank=True, null=True,
help_text='Optional. Determines when the'
'notice dissapears')
# managers
objects = models.Manager()
live = NoticeLiveManager()
class Meta:
ordering = ('-created',)
def __unicode__(self):
return u'Notice: %s' % self.title
|
JulyKikuAkita/PythonPrac
|
cs15211/BinaryTreeCameras.py
|
Python
|
apache-2.0
| 4,264 | 0.003752 |
__source__ = 'https://leetcode.com/problems/binary-tree-cameras/'
# Time: O(N)
# Space: O(H)
#
# Description: Leetcode # 968. Binary Tree Cameras
#
# Given a binary tree, we install cameras on the nodes of the tree.
#
# Each camera at a node can monitor its parent, itself, and its immediate children.
#
# Calculate the minimum number of cameras needed to monitor all nodes of the tree.
#
# Example 1:
#
# Input: [0,0,null,0,0]
# Output: 1
# Explanation: One camera is enough to monitor all nodes if placed as shown.
#
# Example 2:
#
# Input: [0,0,null,0,null,0,null,null,0]
# Output: 2
# Explanation: At least two cameras are needed to monitor all nodes of the tree.
# The above image shows one of the valid configurations of camera placement.
#
# Note:
# The number of nodes in the given tree will be in the range [1, 1000].
# Every node has value 0.
#
import unittest
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 76ms 100%
class Solution(object):
def minCameraCover(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def solve(node):
# 0: Strict ST; All nodes below this are covered, but not this one
# 1: Normal ST; All nodes below and incl this are covered - no camera
# 2: Placed camera; All nodes below this are covered, plus camera here
if not node: return 0, 0, float('inf')
L = solve(node.left)
R = solve(node.right)
dp0 = L[1] + R[1]
dp1 = min(L[2] + min(R[1:]), R[2] +
|
min(L[1:]))
dp2 = 1 + mi
|
n(L) + min(R)
return dp0, dp1, dp2
return min(solve(root)[1:])
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/binary-tree-cameras/solution/
#
Approach 1: Dynamic Programming
Complexity Analysis
Time Complexity: O(N), where N is the number of nodes in the given tree.
Space Complexity: O(H), where H is the height of the given tree.
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# 10ms 100%
class Solution {
public int minCameraCover(TreeNode root) {
int[] ans = solve(root);
return Math.min(ans[1], ans[2]);
}
// 0: Strict ST; All nodes below this are covered, but not this one
// 1: Normal ST; All nodes below and incl this are covered - no camera
// 2: Placed camera; All nodes below this are covered, plus camera here
public int[] solve(TreeNode node) {
if (node == null) return new int[]{ 0, 0, 99999 };
int[] L = solve(node.left);
int[] R = solve(node.right);
int mL12 = Math.min(L[1], L[2]);
int mR12 = Math.min(R[1], R[2]);
int d0 = L[1] + R[1];
int d1 = Math.min(L[2] + mR12, R[2] + mL12);
int d2 = 1 + Math.min(L[0], mL12) + Math.min(R[0], mR12);
return new int[]{d0, d1, d2};
}
}
# https://leetcode.com/problems/binary-tree-cameras/discuss/211180/JavaC%2B%2BPython-Greedy-DFS
# Explanation:
# Apply a recusion function dfs.
# Return 0 if it's a leaf.
# Return 1 if it's a parent of a leaf, with a camera on this node.
# Return 2 if it's coverd, without a camera on this node.
#
# For each node,
# if it has a child, which is leaf (node 0), then it needs camera.
# if it has a child, which is the parent of a leaf (node 1), then it's covered.
#
# If it needs camera, then res++ and we return 1.
# If it's covered, we return 2.
# Otherwise, we return 0.
# 9ms 100%
class Solution {
int res = 0;
public int minCameraCover(TreeNode root) {
return (dfs(root) < 1 ? 1: 0) + res;
}
private int dfs(TreeNode root) {
int left = root.left == null ? 2 : dfs(root.left),
right = root.right == null ? 2 : dfs(root.right);
if (left == 0 || right == 0) {
res++;
return 1;
}
return left == 1 || right == 1 ? 2 : 0;
}
}
'''
|
TwilioDevEd/api-snippets
|
sync/rest/documents/create-document/create-document.7.x.py
|
Python
|
mit
| 864 | 0 |
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
from datetime import datetime
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
data = {
'date_updated': str(datetime.now()),
'movie_title': "On The Line",
'show_times': ["12:30:00Z", "14:45:00Z", "15:30:00Z", "17:45:00Z"],
'st
|
arring': ["Lance Bass", "Joey Fatone"],
'genre': "Romance"
}
document = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents \
.create(unique_name="MyFirstDocument",
data=data,
ttl=1
|
814400) # expires in 21 days
print(document.sid)
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-ncbit/package.py
|
Python
|
lgpl-2.1
| 1,646 | 0.000608 |
############
|
##################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
|
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RNcbit(RPackage):
"""Making NCBI taxonomic data locally available and searchable as an R
object."""
homepage = "https://cran.r-project.org/package=ncbit"
url = "https://cran.r-project.org/src/contrib/ncbit_2013.03.29.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/ncbit"
version('2013.03.29', '29582d7e5c8bbf9683c57c4f6ac3e891')
|
sakura-internet/saklient.python
|
saklient/cloud/errors/resourcepathnotfoundexception.py
|
Python
|
mit
| 742 | 0.009174 |
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ...errors.httpnotfoundexception import HttpN
|
otFoundException
import saklient
str = six.text_type
# module saklient.cloud.errors.resourcepathnotfoundexception
class ResourcePathNotFoundException(HttpNotFoundException):
## 対象が見つかりません。パスに誤りがあります。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status,
|
code=None, message=""):
super(ResourcePathNotFoundException, self).__init__(status, code, "対象が見つかりません。パスに誤りがあります。" if message is None or message == "" else message)
|
yongshengwang/builthue
|
apps/hbase/src/hbase/management/commands/hbase_setup.py
|
Python
|
apache-2.0
| 3,274 | 0.007025 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed und
|
er the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the s
|
pecific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime, timedelta
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from desktop.lib.paths import get_apps_root
from hbased.ttypes import AlreadyExists
from hbase.api import HbaseApi
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
help = 'Create and fill some demo tables in the first configured cluster.'
def handle_noargs(self, **options):
api = HbaseApi()
cluster_name = api.getClusters()[0]['name'] # Currently pick first configured cluster
# Check connectivity
api.connectCluster(cluster_name)
self.create_analytics_table(api, cluster_name)
self.load_analytics_table(api, cluster_name)
self.create_binary_table(api, cluster_name)
self.load_binary_table(api, cluster_name)
def create_analytics_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'analytics_demo', [{'properties': {'name': 'hour'}}, {'properties': {'name': 'day'}}, {'properties': {'name': 'total'}}])
except AlreadyExists:
pass
def load_analytics_table(self, api, cluster_name):
table_data = os.path.join(get_apps_root(), 'hbase', 'example', 'analytics', 'hbase-analytics.tsv')
api.bulkUpload(cluster_name, 'analytics_demo', open(table_data))
def create_binary_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'document_demo', [{'properties': {'name': 'doc'}}])
except AlreadyExists:
pass
def load_binary_table(self, api, cluster_name):
today = datetime.now().strftime('%Y%m%d')
tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
api.putRow(cluster_name, 'document_demo', today, {'doc:txt': 'Hue is awesome!'})
api.putRow(cluster_name, 'document_demo', today, {'doc:json': '{"user": "hue", "coolness": "extra"}'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I like HBase</xml>'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I LOVE HBase</xml>'})
root = os.path.join(get_apps_root(), 'hbase', 'example', 'documents')
api.putRow(cluster_name, 'document_demo', today, {'doc:img': open(root + '/hue-logo.png', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:html': open(root + '/gethue.com.html', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:pdf': open(root + '/gethue.pdf', "rb").read()})
|
cdrooom/odoo
|
addons/mail/mail_mail.py
|
Python
|
agpl-3.0
| 18,908 | 0.004337 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
from email.utils import formataddr
from urlparse import urljoin
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import openerp.tools as tools
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_rec_name = 'subject'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade', auto_join=True),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True, copy=False),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_to': fields.text('To', help='Message recipients (emails)'),
'recipient_ids': fields.many2many('res.partner', string='To (Partners)'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
'headers': fields.text('Headers', copy=False),
'failure_reason': fields.text('Failure Reason', help="Failure reason. This is usually the exception thrown by the email server, stored to e
|
ase the debugging of mailing issues.", readonly=1),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification',
help='Mail has been created to notify people of an existing mail.message'),
}
_defaults = {
'state': 'outgoing',
}
def default_ge
|
t(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection:
context = dict(context, default_type=None)
return super(mail_mail, self).default_get(cr, uid, fields, context=context)
def create(self, cr, uid, values, context=None):
# notification field: if not set, set if mail comes from an existing mail.message
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
return super(mail_mail, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# cascade-delete the parent message for all mails that are not created for a notification
ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)])
parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)]
res = super(mail_mail, self).unlink(cr, uid, ids, context=context)
self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context)
return res
def mark_outgoing(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context)
def cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
@api.cr_uid
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = [('state', '=', 'outgoing')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail_sent and mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
#------------------------------------------------------
# mail_mail formatting, tools and send mechanism
#------------------------------------------------------
def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None):
"""Generate URLs for links in mails: partner has access (is user):
link to action_mail_redirect action that will redirect to doc or Inbox """
if context is None:
context = {}
if partner and partner.user_ids:
base_url = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'web.base.url')
mail_model = mail.model or 'mail.thread'
url = urljoin(base_url, self.pool[mail_model]._get_access_link(cr, uid, mail, partner, context=context))
return "<span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % {
|
ovnicraft/odoo_addons
|
smile_access_control/tests/__init__.py
|
Python
|
agpl-3.0
| 1,040 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the
|
GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even th
|
e implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test_access_control
import test_users
import test_groups
|
jeremiahyan/odoo
|
addons/website_event_exhibitor/tests/common.py
|
Python
|
gpl-3.0
| 1,021 | 0 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.website_event.tests.common import TestEventOnlineCommon
class TestEventExhibitorCommon(TestEventOnlineCommon):
@classmethod
def setUpClass(cls):
super(TestEventExhibitorCommon, cls).setUpClass()
# Sponsorship data
cls.sponsor_type_0 = cls.env['event.sponsor.type'].create({
'nam
|
e': 'GigaTop',
'sequence': 1,
})
cls.sponsor_0_partner = cls.e
|
nv['res.partner'].create({
'name': 'EventSponsor',
'country_id': cls.env.ref('base.be').id,
'email': 'event.sponsor@example.com',
'phone': '04856112233',
})
cls.sponsor_0 = cls.env['event.sponsor'].create({
'partner_id': cls.sponsor_0_partner.id,
'event_id': cls.event_0.id,
'sponsor_type_id': cls.sponsor_type_0.id,
'hour_from': 8.0,
'hour_to': 18.0,
})
|
DaivdZhang/tinyControl
|
tcontrol/tests/test_discretization.py
|
Python
|
bsd-3-clause
| 1,236 | 0 |
from unittest import TestCase
from tcontrol.discretization import c2d
from ..tra
|
nsferfunction import tf
from ..model_conversion import *
from ..statespace import StateSpace
import numpy as np
from .tools.test_utility imp
|
ort assert_ss_equal
class TestDiscretization(TestCase):
def setUp(self):
self.s1 = tf([1], [1, 0, 1])
self.zoh = tf([0.4597, 0.4597], [1, 1.0806, 1], dt=1)
self.ss = tf2ss(tf([1], [1, 0, 1]))
def test_c2d_zoh(self):
d_sys = c2d(self.s1, 1, 'zoh')
self.assertLessEqual(np.max(np.abs(d_sys.num - self.zoh.num)), 1e-4)
def test_c2d_foh(self):
a = c2d(self.ss, 1, 'foh')
b = StateSpace([[0.540302, 0.841471], [-0.841471, 0.540302]],
[[0.773644], [0.49675]],
[[1, 0]], [[0.158529]], dt=1)
assert_ss_equal(a, b)
def test_c2d_tustin(self):
d_sys = c2d(self.s1, 1, 'tustin')
error = np.abs(d_sys.num - np.array([0.2, 0.4, 0.2]))
self.assertLessEqual(np.max(error), 1e-4)
def test_c2d_matched(self):
d_sys = c2d(self.s1, 1, 'matched')
error = np.abs(d_sys.num - np.array([0.2298, 0.4597, 0.2298]))
self.assertLessEqual(np.max(error), 1e-4)
|
versionone/VersionOne.SDK.Python
|
v1pysdk/v1meta.py
|
Python
|
bsd-3-clause
| 9,214 | 0.018016 |
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from client import *
from base_asset import BaseAsset
from cache_decorator import memoized
from special_class_methods import special_classes
from none_deref import NoneDeref
from string_utils import split_attribute
class V1Meta(object):
def __init__(self, *args, **kw):
self.server = V1Server(*args, **kw)
self.global_cache = {}
self.dirtylist = []
def __getattr__(self, attr):
"Dynamically build asset type classes when someone tries to get attrs "
"that we don't have."
return self.asset_class(attr)
def __enter__(self):
return self
def __exit__(self, *args, **kw):
self.commit()
@memoized
def asset_class(self, asset_type_name):
xmldata = self.server.get_meta_xml(asset_type_name)
class_members = {
'_v1_v1meta': self,
'_v1_asset_type_name': asset_type_name,
}
for operation in xmldata.findall('Operation'):
opname = operation.get('name')
def operation_func(myself, opname2=opname):
myself._v1_execute_operation(opname2)
class_members[opname] = operation_func
for attribute in xmldata.findall('AttributeDefinition'):
attr = attribute.get("name")
if attribute.get('attributetype') == 'Relation':
if attribute.get('ismultivalue') == 'True':
def getter(self, attr=attr):
return self._v1_getattr(attr)
def setter(self, value, attr=attr):
return self._v1_setattr(attr, list(value))
def deleter(self, attr=attr):
raise NotImplementedError
else:
def getter(self, attr=attr):
v = self._v1_getattr(attr)
if v:
return self._v1_getattr(attr)[0]
else:
return NoneDeref()
def setter(self, value, attr=attr):
return self._v1_setattr(attr, value)
def deleter(self, attr=attr):
raise NotImplementedError
else:
def getter(self, attr=attr):
return self._v1_getattr(attr)
def setter(self, value, attr=attr):
return self._v1_setattr(attr, value)
def deleter(self, attr=attr):
raise NotImplementedError
class_members[attr] = property(getter, setter, deleter)
bases = [BaseAsset,]
# mix in any special methods
if asset_type_name in special_classes:
mixin = special_classes[asset_type_name]
bases.append(mixin)
new_asset_class = type(asset_type_name, tuple(bases), class_members)
return
|
new_asset_class
def add_to_dirty_list(self, asset_instance):
self.dirtylist.append(asset_instance)
def commit(self):
errors = []
for asset in self.dirtylist:
try:
|
asset._v1_commit()
except V1Error, e:
errors.append(e)
self.dirtylist = []
return errors
def generate_update_doc(self, newdata):
update_doc = Element('Asset')
for attrname, newvalue in newdata.items():
if newvalue is None: # single relation was removed
node = Element('Relation')
node.set('name', attrname)
node.set('act', 'set')
elif isinstance(newvalue, BaseAsset): # single relation was changed
node = Element('Relation')
node.set('name', attrname)
node.set('act', 'set')
ra = Element('Asset')
ra.set('idref', newvalue.idref)
node.append(ra)
elif isinstance(newvalue, list): # multi relation was changed
node = Element('Relation')
node.set('name', attrname)
for item in newvalue:
child = Element('Asset')
child.set('idref', item.idref)
child.set('act', 'add')
node.append(child)
else: # Not a relation
node = Element('Attribute')
node.set('name', attrname)
node.set('act', 'set')
if isinstance(newvalue, unicode) != True:
node.text = str(newvalue).decode('utf-8')
else:
node.text = newvalue
update_doc.append(node)
return update_doc
def create_asset(self, asset_type_name, newdata):
update_doc = self.generate_update_doc(newdata)
new_asset_xml = self.server.create_asset(asset_type_name, update_doc)
asset_type, asset_oid, asset_moment = new_asset_xml.get('id').split(':')
return self.asset_class(asset_type)(asset_oid)
def update_asset(self, asset_type_name, asset_oid, newdata):
update_doc = self.generate_update_doc(newdata)
return self.server.update_asset(asset_type_name, asset_oid, update_doc)
def execute_operation(self, asset_type_name, oid, opname):
return self.server.execute_operation(asset_type_name, oid, opname)
def get_attr(self, asset_type_name, oid, attrname, moment=None):
xml = self.server.get_attr(asset_type_name, oid, attrname, moment)
dummy_asset = ElementTree.Element('Asset')
dummy_asset.append(xml)
return self.unpack_asset(dummy_asset)[attrname]
def query(self, asset_type_name, wherestring, selstring):
return self.server.get_query_xml(asset_type_name, wherestring, selstring)
def read_asset(self, asset_type_name, asset_oid, moment=None):
xml = self.server.get_asset_xml(asset_type_name, asset_oid, moment)
return self.unpack_asset(xml)
def unpack_asset(self, xml):
output = {}
self.unpack_asset_relations(output, xml)
self.unpack_asset_attributes(output, xml)
return output
def unpack_asset_attributes(self, output, xml):
for attribute in xml.findall('Attribute'):
#key = attribute.get('name').replace('.','_')
key = attribute.get('name')
values = [v.text for v in attribute.findall('Value')]
if len(values) == 0:
values = [attribute.text]
self.add_attribute_to_output(output, key, values)
def unpack_asset_relations(self, output, xml):
# we sort relations in order to insert the shortest ones first, so that
# containing relations are added before leaf ones.
for relation in sorted(xml.findall('Relation'), key=lambda x: x.get('name')):
key = relation.get('name')
related_asset_elements = relation.findall('Asset')
rellist = []
for value_element in related_asset_elements:
relation_idref = value_element.get('idref')
value = self.asset_from_oid(relation_idref)
rellist.append(value)
self.add_relation_to_output(output, key, rellist)
def add_relation_to_output(self, output, relation, assets):
if self.is_attribute_qualified(relation):
(container, leaf) = self.split_relation_to_container_and_leaf(relation)
asset = self.get_related_asset(output, container)
# asset may be unset because the reference is broken
if asset:
asset.with_data({leaf: assets})
else:
output[relation] = assets
def add_attribute_to_output(self, output, relation, values):
if self.is_attribute_qualified(relation):
(container, leaf) = self.split_relation_to_container_and_leaf(relation)
for (asset, value) in zip(self.get_related_assets(output, container), values):
# for calculated values it is not an asset so take the value directly
if hasattr(asset, 'with_data'):
asset.with_data({leaf: value})
else:
output[relation] = value
else:
output[relation] = values[0]
def is_attribute_qualified(self, relation):
parts = split_attribute(relation)
return len(parts) > 1
def split_relation_to_container_and_leaf(self, relation):
parts = split_attribute(relation)
return ('.'.join(parts[:-1]), parts[-1])
def get_related_assets(self, output, relation):
if self.is_attribute_qualified(relation):
parts = split_attribute(relation)
assets = output[parts[0]]
for part in parts[1:]:
try:
asset = assets[0]
except IndexError:
return []
assets = asset._v1_getattr(part)
return assets
else:
return output[relation]
|
jellegerbrandy/bioport-site
|
bioport/mail_validation.py
|
Python
|
gpl-3.0
| 1,103 | 0.004533 |
##########################################################################
# Copyright (C) 2009 - 2014 Huygens ING & Gerbrandy S.R.L.
#
# This file is part of bioport.
#
# bioport is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, eithe
|
r version 3 of the
# License, or (at your option) any
|
later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/gpl-3.0.html>.
##########################################################################
from formencode.validators import Email
from formencode.api import Invalid
def check_email(email):
try:
Email().to_python(email)
return True
except Invalid, error:
return False
|
opennode/nodeconductor
|
waldur_core/structure/tests/unittests/test_tasks.py
|
Python
|
mit
| 2,443 | 0.001228 |
from ddt import ddt, data
from django.test import TestCase
from six.moves import mock
from waldur_core.core import utils
from waldur_core.structure import tasks
from waldur_core.structure.tests import factories, models
class TestDetectVMCoordinatesTask(TestCase):
@mock.patch('requests.get')
def test_task_sets_coordinates(self, mock_request_get):
ip_address = "127.0.0.1"
expected_latitude = 20
expected_longitude = 20
instance = factories.TestNewInstanceFactory()
mock_request_get.return_value.ok = True
response = {"ip": ip_address, "latitude": expected_latitude, "longitude": expected_longitude}
mock_request_get.return_value.json.return_value = response
tasks.detect_vm_coordinates(utils.serialize_instance(instance))
instance.refresh_from_db()
self.assertEqual(instance.latitude, expected_latitude)
self.assertEqual(instance.longitude, expected_longitude)
@mock.patch('requests.get')
def test_task_does_not_set_coordinates_if_response_is_not_ok(self, mock_request_get):
instance = factories.TestNewInstanceFactory()
mock_request_get.return_value.ok = False
tasks.detect_vm_coordinates(utils.serialize_instance(instance))
instance.refresh_from_db()
self.assertIsNone(instance.latitude)
self.assertIsNone(instance.longitude)
@ddt
class ThrottleProvisionTaskTest(TestCase):
@data(
dict(size=tasks.ThrottleProvisionTask.DEFAULT_LIMIT + 1, retried=True),
dict(size=tasks.ThrottleProvisionTask.DEFAULT_LIMIT - 1, retried=False),
)
def test_if_limit_is_reached_provisioning_is_delayed(self, params):
link = factories.TestServiceProjectLinkFactory()
factories.TestNewInstanceFactory.create_batch(
size=params['size'],
state=models.TestNewInstance.States.CREATING,
service_project_link=link)
vm = factories.TestNewInstanceFactory(
state=models.Test
|
NewInstance.States.CREATION_SCHEDULED,
service_project_link=link)
serialized_vm = utils.serialize_instance(vm)
mocked_retry = mock.Mock()
tasks.ThrottleProvisionTask.retry = mocked_retry
|
tasks.ThrottleProvisionTask().si(
serialized_vm,
'create',
state_transition='begin_starting').apply()
self.assertEqual(mocked_retry.called, params['retried'])
|
cchristelis/inasafe
|
safe/impact_functions/volcanic/volcano_polygon_population/metadata_definitions.py
|
Python
|
gpl-3.0
| 4,986 | 0 |
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Volcano Polygon on Population
Metadata Definitions.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe.common.utilities import OrderedDict
from safe.defaults import (
default_minimum_needs,
default_gender_postprocessor,
age_postprocessor,
minimum_needs_selector)
from safe.impact_functions.impact_function_metadata import \
ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
layer_mode_classified,
layer_mode_continuous,
layer_geometry_polygon,
layer_geometry_raster,
hazard_volcano,
volcano_vector_hazard_classes,
hazard_category_multiple_event,
hazard_category_single_event,
exposure_population,
count_exposure_unit,
volcano_name_field,
)
class VolcanoPolygonPopulationFunctionMetadata(ImpactFunctionMetadata):
"""Metadata for VolcanoPolygonPopulationFunctionMetadata.
.. versionadded:: 2.1
We only need to re-implement as_dict(), all other behaviours
are inherited from the abstract base class.
"""
@staticmethod
def as_dict():
"""Return metadata as a dictionary.
This is a static method. You can use it to get the metadata in
dictionary format for an impact function.
:returns: A dictionary representing all the metadata for the
concrete impact function.
:rtype: dict
"""
dict_meta = {
'id': 'VolcanoPolygonPopulationFunction',
'name': tr('Polygon volcano on population'),
'impact': tr('Need evacuation'),
'title': tr('Need evacuation'),
'function_type': 'old-style',
'author': 'AIFDR',
|
'date_implemented': 'N/A',
'hazard_input': tr(
'The hazard layer must be
|
a polygon layer. This layer '
'must have an attribute representing the volcano hazard '
'zone that can be specified in the impact function option. '
'There are three classes low, medium, and high. The default '
'values are "Kawasan Rawan Bencana I" for low, "Kawasan Rawan '
'Bencana II" for medium, and "Kawasan Rawan Bencana III for '
'high." If you want to see the name of the volcano in the '
'result, you need to specify the volcano name attribute in '
'the Impact Function options.'),
'exposure_input': tr(
'An exposure raster layer where each cell represents a '
'population count for that cell.'),
'output': tr(
'A vector layer containing people affected per hazard zone '
'and the minimum needs based on the number of people '
'affected.'),
'actions': tr(
'Provide details about the number of people that are within '
'each hazard zone.'),
'limitations': [],
'citations': [],
'overview': tr(
'To assess the impact of a volcano eruption on people.'),
'detailed_description': '',
'layer_requirements': {
'hazard': {
'layer_mode': layer_mode_classified,
'layer_geometries': [layer_geometry_polygon],
'hazard_categories': [
hazard_category_multiple_event,
hazard_category_single_event
],
'hazard_types': [hazard_volcano],
'continuous_hazard_units': [],
'vector_hazard_classifications': [
volcano_vector_hazard_classes],
'raster_hazard_classifications': [],
'additional_keywords': [
volcano_name_field]
},
'exposure': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'exposure_types': [exposure_population],
'exposure_units': [count_exposure_unit],
'exposure_class_fields': [],
'additional_keywords': []
}
},
'parameters': OrderedDict([
('postprocessors', OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
])),
('minimum needs', default_minimum_needs())
])
}
return dict_meta
|
overdrive3000/skytools
|
python/skytools/plpy_applyrow.py
|
Python
|
isc
| 6,707 | 0.006113 |
"""
PLPY helper module for applying row events from pgq.logutriga().
"""
import plpy
import pkgloader
pkgloader.require('skytools', '3.0')
import skytools
## TODO: automatic fkey detection
# find FK columns
FK_SQL = """
SELECT (SELECT array_agg( (SELECT attname::text FROM pg_attribute
WHERE attrelid = conrelid AND attnum = conkey[i]))
FROM generate_series(1, array_upper(conkey, 1)) i) AS kcols,
(SELECT array_agg( (SELECT attname::text FROM pg_attribute
WHERE attrelid = confrelid AND attnum = confkey[i]))
FROM generate_series(1, array_upper(confkey, 1)) i) AS fcols,
confrelid::regclass::text AS ftable
FROM pg_constraint
WHERE conrelid = {tbl}::regclass AND contype='f'
"""
class DataError(Exception):
"Invalid data"
def colfilter_full(rnew, rold):
return rnew
def colfilter_changed(rnew, rold):
res = {}
for k, v in rnew:
if rnew[k] != rold[k]:
res[k] = rnew[k]
return res
def canapply_dummy(rnew, rold):
return True
def canapply_tstamp_helper(rnew, rold, tscol):
tnew = rnew[tscol]
told = rold[tscol]
if not tnew[0].isdigit():
raise DataError('invalid timestamp')
if not told[0].isdigit():
raise DataError('invalid timestamp')
return tnew > told
def applyrow(tblname, ev_type, new_row,
backup_row = None,
alt_pkey_cols = None,
fkey_cols = None,
fkey_ref_table = None,
fkey_ref_cols = None,
fn_canapply = canapply_dummy,
fn_colfilter = colfilter_full):
"""Core logic. Actual decisions will be done in callback functions.
- [IUD]: If row referenced by fkey does not exist, event is not applied
- If pkey does not exist but alt_pkey does, row is not applied.
@param tblname: table name, schema-qualified
@param ev_type: [IUD]:pkey1,pkey2
@param alt_pkey_cols: list of alternatice columns to consuder
@param fkey_cols: columns in this table that refer to other table
@param fkey_ref_table: other table referenced here
@param fkey_ref_cols: column in other table that must match
@param fn_canapply: callback function, gets new and old row, returns whether the row should be applied
@param fn_colfilter: callback function, gets new and old row, returns dict of final columns to be applied
"""
gd = None
# parse ev_type
tmp = ev_type.split(':', 1)
if len(tmp) != 2 or tmp[0] not in ('I', 'U', 'D'):
raise DataError('Unsupported ev_type: '+repr(ev_type))
if not tmp[1]:
raise DataError('No pkey in event')
cmd = tmp[0]
pkey_cols = tmp[1].split(',')
qtblname = skytools.quote_fqident(tblname)
# parse ev_data
fields = skytools.db_urldecode(new_row)
if ev_type.find('}') >= 0:
raise DataError('Really suspicious activity')
if ",".join(fields.keys()).find('}') >= 0:
raise DataError('Really suspicious activity 2')
# generate pkey expressions
tmp = ["%s = {%s}" % (skytools.quote_ident(k), k) for k in pkey_cols]
pkey_expr = " and ".join(tmp)
alt_pkey_expr = None
if alt_pkey_cols:
tmp = ["%s = {%s}" % (skytools.quote_ident(k), k) for k in alt_pkey_cols]
alt_pkey_expr = " and ".join(tmp)
log = "data ok"
#
# Row data seems fine, now apply it
#
if fkey_ref_table:
tmp = []
for k, rk in zip(fkey_cols, fkey_ref_cols):
tmp.append("%s = {%s}" % (skytools.quote_ident(rk), k))
fkey_expr = " and ".join(tmp)
q = "select 1 from only %s where %s" % (
skytools.quote_fqident(fkey_ref_table),
fkey_expr)
res = skytools.plpy_exec(gd, q, fields)
if not res:
return "IGN: parent row does not exist"
log += ", fkey ok"
# fetch old row
if alt_pkey_expr:
q = "select * from only %s where %s for update" % (qtblname, alt_pkey_expr)
res = skytools.plpy_exec(gd, q, fields)
if res:
oldrow = res[0]
# if altpk matches, but pk not, then delete
need_del = 0
for k in pkey_cols:
# fixme: proper type cmp?
if fields[k] != str(oldrow[k]):
need_del = 1
break
if need_del:
log += ", altpk del"
q = "delete from only %s where %s" % (qtblname, alt_pkey_expr)
skytools.plpy_exec(gd, q, fields)
res = None
else:
log += ", altpk ok"
else:
# no altpk
q = "select * from only %s where %s for update" % (qtblname, pkey_expr)
res = skytools.plpy_exec(None, q, fields)
# got old row, with same pk and altpk
if res:
oldrow = res[0]
log += ", old row"
ok = fn_canapply(fields, oldrow)
if ok:
log += ", new row better"
if not ok:
# ignore the update
return "IGN:" + log + ", current row more up-to-date"
else:
log += ", no old row"
oldrow = None
if res:
if cmd == 'I':
cmd = 'U'
else:
if cmd == 'U':
cmd = 'I'
# allow column changes
if oldrow:
fields2 = fn_colfilter(fields, oldrow)
for k in pkey_cols:
if k not in fields2:
fields2[k] = fields[k]
fields = fields2
# apply change
if cmd == 'I':
|
q = skytools.mk_insert_sql(fields, tblname, pkey_cols)
elif cmd == 'U':
q = skytools.mk_update_sql(fields, tblname, pkey_cols)
elif cmd == 'D':
q = skytools.mk_delete_sql(fields, tblname, pkey_cols)
else:
|
plpy.error('Huh')
plpy.execute(q)
return log
def ts_conflict_handler(gd, args):
"""Conflict handling based on timestamp column."""
conf = skytools.db_urldecode(args[0])
timefield = conf['timefield']
ev_type = args[1]
ev_data = args[2]
ev_extra1 = args[3]
ev_extra2 = args[4]
ev_extra3 = args[5]
ev_extra4 = args[6]
altpk = None
if 'altpk' in conf:
altpk = conf['altpk'].split(',')
def ts_canapply(rnew, rold):
return canapply_tstamp_helper(rnew, rold, timefield)
return applyrow(ev_extra1, ev_type, ev_data,
backup_row = ev_extra2,
alt_pkey_cols = altpk,
fkey_ref_table = conf.get('fkey_ref_table'),
fkey_ref_cols = conf.get('fkey_ref_cols'),
fkey_cols = conf.get('fkey_cols'),
fn_canapply = ts_canapply)
|
lsaffre/blog
|
docs/blog/2016/0305.py
|
Python
|
agpl-3.0
| 1,941 | 0.00103 |
# -*- coding: UTF-8 -*-
from __future__ import print_function
import csv
import os
ignored_views = set(["HHB", "FFO", "FFOI"])
seen_views = set([])
seen_aliases = set([])
seen_groups = set([])
tpl = "check_journal(u'{1}', u'{4}', u'{11}', u'{10}')"
print("""# -*- coding: UTF-8 -*-
from __future__ import print_function
from lino.api import rt
ledger = rt.models.ledger
finan = rt.models.finan
vatless = rt.models.vatless
def check_journal(ref, name, view, group):
if ledger.Journal.objects.filter(ref=ref).count():
print("Journal", ref, "exists")
return
if not group:
return
if view == "REG":
voucher_type = 'vatless.ProjectInvoicesByJournal'
elif view == "AAW":
voucher_type = 'finan.DisbursementOrdersByJournal'
elif view == "KAS":
voucher_type = 'finan.BankStatementsByJournal'
elif view == "ZAU":
voucher_type = 'finan.PaymentOrdersByJournal'
else:
return
grp = ledger.JournalGroups.get_by_name(group.lower())
obj = ledger.Journal(ref=ref, name=name, voucher_type=voucher_type,
journal_group=grp)
obj.full_clean()
# uncomment the following line when ready:
# obj.save()
print("Journal", ref, "has been created")
""")
with open(os.path.expanduser('~/Downloads/JNL.csv'), 'r') as
|
csvfile:
reader = csv.reader(csvfile, delimiter=';', quotechar='"')
for row in reader:
row = [x.strip() for x in row]
alias
|
= row[2].strip()
group = row[10].strip()
view = row[11].strip()
if alias in ["IMP"]:
if view not in ignored_views:
seen_views.add(view)
seen_aliases.add(alias)
seen_groups.add(group)
print(tpl.format(*row))
# print(', '.join(row))
#print("# Seen aliases:", seen_aliases)
print("# Seen views:", seen_views)
print("# Seen groups:", seen_groups)
|
citrix-openstack-build/neutron-fwaas
|
neutron_fwaas/tests.skip/unit/services/firewall/drivers/linux/test_iptables_fwaas.py
|
Python
|
apache-2.0
| 11,580 | 0.00095 |
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
from neutron.tests import base
from neutron.tests.unit import test_api_v2
import neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas as fwaas
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1, distributed=False,
distributed_mode=None):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
router_inst = {'distributed': distributed}
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
router_info_inst.snat_iptables_manager = iptables_inst
if distributed_mode == 'dvr':
router_info_inst.dist_fip_count = 1
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1,
distributed=False, distributed_mode=None):
apply_list = self._fake_apply_list(router_count=router_count,
distributed=distributed, distributed_mode=distributed_mode)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
if distributed:
if distributed_mode == 'dvr_snat':
if_prefix = 'sg-+'
if distributed_mode == 'dvr':
if_prefix = 'rfp-+'
else:
if_prefix = 'qr-+'
distributed_mode = 'legacy'
func(distributed_mode, apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
|
mock.call.add_chain(egr
|
ess_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o %s -j %s' % (if_prefix,
ipt_mgr_ichain)),
mock.call.add_rule('FORWARD',
'-i %s -j %s' % (if_prefix,
ipt_mgr_echain)),
mock.call.add_rule('FORWARD',
'-o %s -j %s-fwaas-defau' % (if_prefix,
bname)),
mock.call.add_rule('FORWARD',
'-i %s -j %s-fwaas-defau' % (if_prefix,
bname))]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall('legacy', apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.
|
giorgiop/scikit-learn
|
sklearn/metrics/pairwise.py
|
Python
|
bsd-3-clause
| 46,491 | 0.000043 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
|
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X
|
, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common c
|
sgallagher/reviewboard
|
reviewboard/webapi/resources/base_file_attachment.py
|
Python
|
mit
| 2,246 | 0 |
from __future__ import unicode_literals
from django.utils import six
from reviewboard.attachments.models import FileAttachment
from reviewboard.webapi.base import WebAPIResource
class BaseFileAttachmentResource(WebAPIResource):
|
"""A base res
|
ource representing file attachments."""
added_in = '1.6'
model = FileAttachment
name = 'file_attachment'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the file.',
},
'caption': {
'type': six.text_type,
'description': "The file's descriptive caption.",
},
'filename': {
'type': six.text_type,
'description': "The name of the file.",
},
'absolute_url': {
'type': six.text_type,
'description': "The absolute URL of the file, for downloading "
"purposes.",
'added_in': '2.0',
},
'icon_url': {
'type': six.text_type,
'description': 'The URL to a 24x24 icon representing this file. '
'The use of these icons is deprecated and this '
'property will be removed in a future version.',
'deprecated_in': '2.5',
},
'mimetype': {
'type': six.text_type,
'description': 'The mimetype for the file.',
'added_in': '2.0',
},
'thumbnail': {
'type': six.text_type,
'description': 'A thumbnail representing this file.',
'added_in': '1.7',
},
}
uri_object_key = 'file_attachment_id'
def serialize_absolute_url_field(self, obj, request, **kwargs):
return request.build_absolute_uri(obj.get_absolute_url())
def serialize_caption_field(self, obj, **kwargs):
# We prefer 'caption' here, because when creating a new file
# attachment, it won't be full of data yet (and since we're posting
# to file-attachments/, it doesn't hit DraftFileAttachmentResource).
# DraftFileAttachmentResource will prefer draft_caption, in case people
# are changing an existing one.
return obj.caption or obj.draft_caption
|
matevzmihalic/wlansi-store
|
wlansi_store/cms_app.py
|
Python
|
agpl-3.0
| 264 | 0.011364 |
from cms.app_base import CMSApp
from
|
cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class ProductApphook(CMSApp):
name = _("Product Apphook")
urls = ["wlansi_store.urls"]
apphook_pool.register(ProductApphoo
|
k)
|
zzzoidberg/landscape
|
finance/consts.py
|
Python
|
mit
| 78 | 0 |
# -*
|
- coding: UTF-8 -*-
"""
Package-wide constants.
"""
CALL = 'C'
PUT = 'P
|
'
|
nilovna/EnceFAL
|
project/encefal/migrations/0003_auto__add_unique_vendeur_code_permanent.py
|
Python
|
gpl-3.0
| 8,651 | 0.007629 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Vendeur', fields ['code_permanent']
db.create_unique(u'encefal_vendeur', ['code_permanent'])
def backwards(self, orm):
# Removing unique constraint on 'Vendeur', fields ['code_permanent']
db.delete_unique(u'encefal_vendeur', ['code_permanent'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encefal.exemplaire': {
'Meta': {'object_name': 'Exemplaire'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'etat': ('django.db.models.fields.CharField', [], {'default': "'VENT'", 'max_length': '4'}),
'facture': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'exemplaires'", 'null': 'True', 'db_column': "'facture'", 'to': u"orm['encefal.Facture']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'livre': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exemplaires'", 'db_column': "'livre'", 'to': u"orm['encefal.Livre']"}),
'prix': ('django.db.models.fields.IntegerField', [], {}),
'vendeur': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exemplaires'", 'db_column': "'vendeur'", 'to': u"orm['encefal.Vendeur']"})
},
u'encefal.facture': {
'Meta': {'object_name': 'Facture'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'employe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'factures'", 'blank': 'True', 'db_column': "'employe'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'factures'", 'blank': 'True', 'db_column': "'session'", 'to': u"orm['encefal.Session']"})
},
u'encefal.livre': {
'Meta': {'object_name': 'Livre'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'auteur': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '13', 'blank': 'True'}),
|
'titre': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'vendeur': ('djan
|
go.db.models.fields.related.ManyToManyField', [], {'related_name': "'livres'", 'symmetrical': 'False', 'through': u"orm['encefal.Exemplaire']", 'db_column': "'vendeur'", 'to': u"orm['encefal.Vendeur']"})
},
u'encefal.session': {
'Meta': {'object_name': 'Session'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_debut': ('django.db.models.fields.DateField', [], {}),
'date_fin': ('django.db.models.fields.DateField', [], {}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nom': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'encefal.vendeur': {
'Meta': {'object_name': 'Vendeur'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'code_permanent': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db
|
kidaa/kythe
|
third_party/grpc/src/python/src/grpc/framework/common/cardinality.py
|
Python
|
apache-2.0
| 1,930 | 0.002591 |
# Copyright 2015, Google Inc.
# All r
|
ights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and
|
the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines an enum for classifying RPC methods by streaming semantics."""
import enum
@enum.unique
class Cardinality(enum.Enum):
"""Describes the streaming semantics of an RPC method."""
UNARY_UNARY = 'request-unary/response-unary'
UNARY_STREAM = 'request-unary/response-streaming'
STREAM_UNARY = 'request-streaming/response-unary'
STREAM_STREAM = 'request-streaming/response-streaming'
|
jabumaho/MNIST-neural-network
|
plot_error.py
|
Python
|
gpl-3.0
| 235 | 0.004255 |
from matplotlib import pyplot as plt
path
|
= "C:/Temp/mnisterrors/chunk" + str(input("chunk: ")) + ".txt"
with open(path, "r") as f:
errorhistory = [float(line.rstrip('\n')) for line in f]
plt.plo
|
t(errorhistory)
plt.show()
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/GL/ARB/texture_gather.py
|
Python
|
lgpl-3.0
| 1,072 | 0.015858 |
'''OpenGL extension ARB.texture_gather
This module customi
|
ses the behaviour of the
OpenGL.raw.GL.ARB.texture_gather to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a new set of texture functions
(textureGather) to the shading language that determine 2x2 footprint
that are used for linear filtering in a texture lookup, and return a
vector consisting of the first component from each of the four
texels in the
|
footprint.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_gather.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_gather import *
from OpenGL.raw.GL.ARB.texture_gather import _EXTENSION_NAME
def glInitTextureGatherARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
xgin/letsencrypt
|
letsencrypt/reporter.py
|
Python
|
apache-2.0
| 3,253 | 0 |
"""Collects and displays information to the user."""
from __future__ import print_function
import collections
import logging
import os
import Queue
import sys
import textwrap
import zope.interface
from letsencrypt import interfaces
from letsencrypt import le_util
logger = logging.getLogger(__name__)
class Reporter(object):
"""Collects and displays information to the user.
:ivar `Queue.PriorityQueue` messages: Messages to be displayed to
the user.
"""
zope.interface.implements(interfaces.IReporter)
HIGH_PRIORITY = 0
"""High priority constant. See `add_message`."""
MEDIUM_PRIORITY = 1
"""Medium priority constant. See `add_message`."""
LOW_PRIORITY = 2
|
"""Low priority constant. See `add_message`."""
_msg_type = collections.namedtuple('ReporterMsg', 'priority text on_crash')
def __init__(self):
self.messages = Queue.PriorityQueue()
def add_message(self, msg, priority, on_crash=True):
"""Adds msg to the list of messages to be printed.
:param str msg: Message to be displayed to the user.
:param int priority: One of `HIGH_PRIORITY`, `MEDIUM_PRIO
|
RITY`,
or `LOW_PRIORITY`.
:param bool on_crash: Whether or not the message should be
printed if the program exits abnormally.
"""
assert self.HIGH_PRIORITY <= priority <= self.LOW_PRIORITY
self.messages.put(self._msg_type(priority, msg, on_crash))
logger.info("Reporting to user: %s", msg)
def atexit_print_messages(self, pid=os.getpid()):
"""Function to be registered with atexit to print messages.
:param int pid: Process ID
"""
# This ensures that messages are only printed from the process that
# created the Reporter.
if pid == os.getpid():
self.print_messages()
def print_messages(self):
"""Prints messages to the user and clears the message queue.
If there is an unhandled exception, only messages for which
``on_crash`` is ``True`` are printed.
"""
bold_on = False
if not self.messages.empty():
no_exception = sys.exc_info()[0] is None
bold_on = sys.stdout.isatty()
if bold_on:
print(le_util.ANSI_SGR_BOLD)
print('IMPORTANT NOTES:')
first_wrapper = textwrap.TextWrapper(
initial_indent=' - ', subsequent_indent=(' ' * 3))
next_wrapper = textwrap.TextWrapper(
initial_indent=first_wrapper.subsequent_indent,
subsequent_indent=first_wrapper.subsequent_indent)
while not self.messages.empty():
msg = self.messages.get()
if no_exception or msg.on_crash:
if bold_on and msg.priority > self.HIGH_PRIORITY:
sys.stdout.write(le_util.ANSI_SGR_RESET)
bold_on = False
lines = msg.text.splitlines()
print(first_wrapper.fill(lines[0]))
if len(lines) > 1:
print("\n".join(
next_wrapper.fill(line) for line in lines[1:]))
if bold_on:
sys.stdout.write(le_util.ANSI_SGR_RESET)
|
rustychris/stompy
|
stompy/grid/quad_laplacian.py
|
Python
|
mit
| 123,597 | 0.021538 |
"""
Create a nearly orthogonal quad mesh by solving for stream function
and velocity potential inside a given boundary.
Still trying to improve the formulation of the Laplacian. Psi
(stream function) and phi (velocity potential) are solved
simultaneously. There are other options, and each field
uniquely constraints the other field to within a constant offset.
A block matrix is constructed which solves the Laplacian for
each of psi and phi. The boundaries of the domain are restricted
to be contours of either phi or psi, and these edges meet at right
angles.
For a grid with N nodes there are 2N unknowns.
Each interior node implies 2 constraints via the Laplacian on psi and phi.
For boundary nodes not at corner, one of psi/phi implies a no-flux boundary
and d phi/dn=0 or d psi/dn=0.
The question is how to constrain the remaining boundary nodes. These boundaries
are contours of the respective field. For a boundary segment of
s nodes, inclusive of corners, that yields s-1 constraints.
TODO:
- enforce monotonic sliding of nodes on the same segment. Currently it's possible
for nodes to slide over each other resulting in an invalid (but sometimes salvageable)
intermediate grid.
- Allow ragged edges.
- Allow multiple patches that can be solved simultaneously.
- Depending on how patches go, may allow for overlaps in phi/psi space if they are distinct in geographic space.
- allow patch connections that are rotated (psi on one side matches phi on the other, or a
full inversion
"""
import numpy as np
from collections import defaultdict
from shapely import geometry, ops
from scipy import sparse, signal
import matplotlib.pyplot as plt
from matplotlib.tri import LinearTriInterpolator,TriFinder,TrapezoidMapTriFinder
from matplotlib import colors
import itertools
from . import unstructured_grid, exact_delaunay,orthogonalize, triangulate_hole
from .. import utils, filters
from ..spatial import field, linestring_utils
from . import front
import logging
log=logging.getLogger('quad_laplacian')
import six
##
# A hack for linear interpolation on g_int. Nodes outside the triangulation
# take their value from the nearest cell.
class PermissiveFinder(TrapezoidMapTriFinder):
def __init__(self,grid):
self.grid=grid
self.mp_tri=grid.mpl_triangulation()
super(PermissiveFinder,self).__init__(self.mp_tri)
def __call__(self, x, y):
base=super(PermissiveFinder,self).__call__(x,y)
missing=np.nonzero(base==-1)[0]
for i in missing:
base[i]=self.grid.select_cells_nearest( [x[i],y[i]] )
return base
# borrow codes as in front.py
RIGID=front.AdvancingFront.RIGID
class NodeDiscretization(object):
def __init__(self,g):
self.g=g
def construct_matrix(self,op='laplacian',dirichlet_nodes={},
zero_tangential_nodes=[],
gradient_nodes={},
skip_dirichlet=True):
"""
Construct a matrix and rhs for the given operation.
dirichlet_nodes: boundary node id => value
zero_tangential_nodes: list of lists. each list gives a set of
nodes which should be equal to each other, allowing specifying
a zero tangential gradient BC.
gradient_nodes: boundary node id => gradient unit vector [dx,dy]
skip_dirichlet: should dirichlet nodes be omitted from other BCs?
"""
g=self.g
# Adjust tangential node data structure for easier use
# in matrix construction
tangential_nodes={}
for grp in zero_tangential_nodes:
leader=grp[0]
for member in grp:
# NB: This includes leader=>leader
assert member not in tangential_nodes
tangential_nodes[member]=leader
# Now I want to allow multiple BCs to constrain the same node.
# How many rows will I end up with?
# First count up the nodes that will get a regular laplacian
# row. This includes boundary nodes that have a no-flux BC.
# (because that's the behavior of the discretization on a
# boundary)
nlaplace_rows=0
laplace_nodes={}
for n in range(g.Nnodes()):
if skip_dirichlet and (n in dirichlet_nodes): continue
if n in gradient_nodes: continue
if n in tangential_nodes: continue
laplace_nodes[n]=True
nlaplace_rows+=1
ndirichlet_nodes=len(dirichlet_nodes)
# Each group of tangential gradient nodes provides len-1 constraints
ntangential_nodes=len(tangential_nodes) - len(zero_tangential_nodes)
ngradient_nodes=len(gradient_nodes)
nrows=nlaplace_rows + ndirichlet_nodes + ntangential_nodes + ngradient_nodes
log.info(f"row breakdown: Lap: {nlaplace_rows} "
f"Dir: {ndirichlet_nodes} Tan: {ntangential_nodes} "
f"({len(zero_tangential_nodes)} grps) Grad: {ngradient_nodes}")
log.info(f"nrows={nrows} N={g.Nnodes()}")
B=np.zeros(nrows,np.float64)
M=sparse.dok_matrix( (nrows,g.Nnodes()),np.float64)
# Very similar code, but messy to refactor so write a new loop.
ndirichlet_nodes=len(dirichlet_nodes)
# Each group of tangential gradient nodes provides len-1 constraints
ntangential_nodes=len(tangential_nodes) - len(zero_tangential_nodes)
ngradient_nodes=len(gradient_nodes)
nrows=nlaplace_rows + ndirichlet_nodes + ntangential_nodes + ngradient_nodes
B=np.zeros(nrows,np.float64)
M=sparse.dok_matrix( (nrows,g.Nnodes()),np.float64)
multiple=True
row=0
for n in laplace_nodes:
nodes,alphas,rhs=self.node_discretization(n,op=op)
B[row]=rhs
for node,alpha in zip(nodes,alphas):
M[row,node]=alpha
row+=1
for n in dirichlet_nodes:
B[row]=dirichlet_nodes[n]
M[row,n]=1
row+=1
for n in gradient_nodes:
vec=gradient_nodes[n] # The direction of the gradient
normal=[vec[1],-vec[0]] # direction of zero gradient
dx_nodes,dx_alphas,_=self.node_discretization(n,op='dx')
dy_nodes,dy_alphas,_=self.node_discretization(n,op='dy')
assert np.all(dx_nodes==dy_nodes),"Have to be cleverer"
nodes=dx_nodes
# So if vec = [1,0], then normal=[0,-1]
# and I want dx*norma[0]+dy*normal[1] = 0
alphas=np.array(dx_alphas)*normal[0] + np.array(dy_alphas)*normal[1]
B[row]=0
for node,alpha in zip(nodes,alphas):
M[row,node]=alpha
row+=1
for n in tangential_nodes:
leader=tangential_nodes[n]
if n==leader:
# print("skip leader")
continue
M[row,n]=1
M[row,leader]=-1
B[row]=0.0
row+=1
assert row==nrows
return M,B
def node_laplacian(self,n0):
return self.node_discretization(n0,'laplacian')
def node_dx(self,n0):
return self.node_discretization(n0,'dx')
def node_dy(self,n0):
return self.node_discretization(n0,'dy')
def node_discretization(self,n0,op='laplacian'):
def beta(c):
return 1.0
N=self.g.angle_sort_adjacent_nodes(n0)
P=len(N)
is_boundary
|
=int(self.g.is_boundary_node(n0))
M=len(N) - is_boundary
if is_boundary:
# roll N to start and end on boundary nodes:
nbr_boundary=[se
|
lf.g.is_boundary_node(n)
for n in N]
while not (nbr_boundary[0] and nbr_boundary[-1]):
N=np.roll(N,1)
nbr_boundary=np.roll(nbr_boundary,1)
# area of the triangles
A=[]
for m in range(M):
tri=[n0,N[m],N[(m+1)%P]]
Am=utils.signed_area( self.g.nodes['x'][tri] )
assert Am!=0.0
A.append(Am)
AT=np.sum(A)
alphas=[]
x=self.g.
|
ctsit/barebones-flask-app
|
app/routes/pages.py
|
Python
|
bsd-3-clause
| 11,826 | 0 |
"""
Goal: Define the routes for general pages
@authors:
Andrei Sura <sura.andrei@gmail.com>
Ruchi Vivek Desai <ruchivdesai@gmail.com>
Sanath Pasumarthy <sanath@ufl.edu>
@see https://flask-login.readthedocs.org/en/latest/
@see https://pythonhosted.org/Flask-Principal/
"""
import hashlib
import base64
import datetime
import uuid
from flask import current_app
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
from app.models.log_entity import LogEntity
from app.models.web_session_entity import WebSessionEntity
from a
|
pp.models.user_agent_entity import UserAgentEntity
from wtforms import Form, TextField, PasswordField, HiddenField, validators
from flask_login import LoginManager
from flask_login import login_user, logout_user, current_user
from flask_principal import \
Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
from app.main import app
from app import utils
from app.models.user_ent
|
ity import UserEntity
# set the login manager for the app
login_manager = LoginManager(app)
# Possible options: strong, basic, None
login_manager.session_protection = "strong"
login_manager.login_message = ""
login_manager.login_message_category = "info"
@login_manager.user_loader
def load_user(user_id):
"""Return the user from the database"""
return UserEntity.get_by_id(user_id)
@login_manager.unauthorized_handler
def unauthorized():
""" Returns a message for the unauthorized users """
return 'Please <a href="{}">login</a> first.'.format(url_for('index'))
@app.errorhandler(403)
def page_not_found(e):
"""
Redirect to login page if probing a protected resources before login
"""
return redirect(url_for('index') + "?next={}".format(request.url))
class LoginForm(Form):
""" Declare the validation rules for the login form """
next = HiddenField(default='')
# email = TextField('Email')
email = TextField('Email',
[validators.Required(),
validators.Length(min=4, max=25)])
password = PasswordField('Password',
[validators.Required(),
validators.Length(min=6, max=25)])
def get_user_agent():
"""Find an existing user agent or insert a new one"""
# The raw user agent string received from the browser
uag = request.user_agent
hash = utils.compute_text_md5(uag.string)
# The entity representing the user agent
user_agent = UserAgentEntity.get_by_hash(hash)
if user_agent is None:
platform = uag.platform if uag.platform is not None else ''
browser = uag.browser if uag.browser is not None else ''
version = uag.version if uag.version is not None else ''
language = uag.language if uag.language is not None else ''
user_agent = UserAgentEntity.create(user_agent=uag.string,
hash=hash,
platform=platform,
browser=browser,
version=version,
language=language)
return user_agent
@app.before_request
def check_session_id():
"""
Generate a UUID and store it in the session
as well as in the WebSession table.
"""
user_agent = get_user_agent()
if 'uuid' not in session:
session['uuid'] = str(uuid.uuid4())
WebSessionEntity.create(session_id=session['uuid'],
user_id=current_user.get_id(),
ip=request.remote_addr,
date_time=datetime.datetime.now(),
user_agent=user_agent)
return
if current_user.is_authenticated():
# update the user_id on the first request after login is completed
session_id = session['uuid']
web_session = WebSessionEntity.get_by_session_id(session_id)
if web_session is not None:
web_session = WebSessionEntity.update(
web_session,
user_id=current_user.get_id())
else:
app.logger.error("No row found for sess_id: {}".format(session_id))
@app.route('/', methods=['POST', 'GET'])
def index():
""" Render the login page"""
if app.config['LOGIN_USING_SHIB_AUTH']:
return render_login_shib()
return render_login_local()
def render_login_local():
""" Render the login page with username/pass
@see #index()
@see #render_login_shib()
"""
if current_user.is_authenticated():
return redirect(get_role_landing_page())
uuid = session['uuid']
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
email = form.email.data.strip(
) if form.email.data else ""
password = form.password.data.strip() if form.password.data else ""
app.logger.debug("{} password: {}".format(email, password))
app.logger.debug("Checking email: {}".format(email))
user = UserEntity.query.filter_by(email=email).first()
if user:
app.logger.debug("Found user object: {}".format(user))
else:
utils.flash_error("No such email: {}".format(email))
LogEntity.login(uuid, "No such email: {}".format(email))
return redirect(url_for('index'))
# if utils.is_valid_auth(app.config['SECRET_KEY'], auth.uathSalt,
# password, auth.uathPassword):
if '' == user.password_hash:
app.logger.info('Log login event for: {}'.format(user))
LogEntity.login(uuid, 'Successful login via email/password')
login_user(user, remember=False, force=False)
# Tell Flask-Principal that the identity has changed
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.get_id()))
return redirect(get_role_landing_page())
else:
app.logger.info('Incorrect pass for: {}'.format(user))
LogEntity.login_error(uuid, 'Incorrect pass for: {}'.format(user))
# When sending a GET request render the login form
return render_template('index.html', form=form,
next_page=request.args.get('next'))
@app.route('/loginExternalAuth', methods=['POST', 'GET'])
def shibb_redirect():
"""
Redirect to the local shibboleth instance where
we can pass the return path.
This route is reached when the user clicks the "Login" button.
Note: This is equivalent to Apache's syntax:
Redirect seeother /loginExternalAuth /Shibboleth.sso/Login?target=...
@see #index()
@see #shibb_return()
"""
next_page = "/Shibboleth.sso/Login?target={}"\
.format(url_for('shibb_return'))
return redirect(next_page)
@app.route('/loginExternalAuthReturn', methods=['POST', 'GET'])
def shibb_return():
"""
Read the Shibboleth headers returned by the IdP after
the user entered the username/password.
If the `eduPersonPrincipalName` (aka Eppn) for the user matches the
usrEmail of an active user then let the user in,
otherwise let them see the login page.
@see #shibb_redirect()
"""
if current_user.is_authenticated():
# next_page = request.args.get('next') or get_role_landing_page()
return redirect(get_role_landing_page())
# fresh login...
uuid = session['uuid']
email = request.headers['Mail']
glid = request.headers['Glid'] # Gatorlink ID
app.logger.debug("Checking if email: {} is registered for glid: {}"
.format(email, glid))
user = UserEntity.query.filter_by(email=email).first()
if not user:
utils.flash_error("No such user: {}".format(email))
LogEntity.login_error(uuid,
"Shibboleth user is not registered for this app")
return redirect(url_for('index'))
if not user.is_active():
utils.flash_error("Inactive user: {}
|
bashkirtsevich/autocode
|
text_preprocessing/es_predict.py
|
Python
|
gpl-3.0
| 1,121 | 0.001898 |
import json
import requests
from transliterate import translit
_eng_chars = "~!@#$%^&qwertyuiop[]asdfghj
|
kl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"|Z
|
XCVBNM<>?"
_rus_chars = "ё!\"№;%:?йцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭ/ЯЧСМИТЬБЮ,"
_trans_table = dict(zip(_eng_chars, _rus_chars))
def _fix_layout(s):
return "".join([_trans_table.get(c, c) for c in s])
def es_predict(es_url, keywords):
query = set(
keywords +
[_fix_layout(word) for word in keywords] +
[translit(word, "ru") for word in keywords]
)
post_data = json.dumps({
"size": 5,
"query": {
"simple_query_string": {
"query": "|".join(query),
"flags": "OR|PREFIX"
}
}
})
response = requests.post(es_url + "/_search", data=post_data).json()
if "hits" in response and "hits" in response["hits"]:
for it in response["hits"]["hits"]:
if "_source" in it and "query" in it["_source"]:
yield it["_source"]["query"]
|
m1ojk/nicedoormat
|
test_email.py
|
Python
|
mit
| 898 | 0.004454 |
#!/usr/bin/env python
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import logging
from helper.pi_tool import PiTool
logging.basicConfig(filename='log/test_email.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = PiTool.RESOLUTION_SQ_L
rawCapture = PiRGBArray(camera)
# allow the camera to warmup
time.sleep(0.3)
# grab an image from the camera
camera.capture(rawCapture, format=
|
"bgr")
#image = PiTool.get_roi_doorhole(rawCaptur
|
e.array)
image = rawCapture.array
image = PiTool.get_doorhole_roi(image)
# display the image on screen and wait for a keypress
#cv2.imshow("Image", image)
#cv2.waitKey(0)
PiTool.save_and_email(image, "test_email")
|
nayas360/pyterm
|
bin/type.py
|
Python
|
mit
| 1,002 | 0 |
# type command prints file contents
from lib.utils import *
def _help():
usage = '''
Usage: type (file)
Print content of (file)
Use '%' in front of global
vars to use value as file
name.
'''
print(usage)
def main(argv):
if len(argv) < 1 or '-h' in argv:
_help()
return
# The shell doesnt send the
# com
|
mand name in the arg list
# so the next line is not needed
# anymore
# argv.pop(0)
# The shell does the work of replacing
# vars already. Code segment below
# is not required anymore.
# argv=replace_vars(argv)
argv = make_s(argv)
path = get_path() + argv
if os.path.isfile(path):
with open(path) as f:
data = f.readlines()
print('___
|
______________<START>_________________\n')
print(make_s2(data))
print('__________________<END>__________________\n')
return
elif os.path.isdir(path):
err(3, add=argv + ' is a directory')
else:
err(2, path)
|
wheeler-microfluidics/svg_model
|
svg_model/svgload/svg_parser.py
|
Python
|
lgpl-2.1
| 7,624 | 0.002361 |
'''
This is a New BSD License.
http://www.opensource.org/licenses/bsd-license.php
Copyright (c) 2008-2009, Jonathan Hartley (tartley@tartley.com)
Copyright (c) 2012, Christian Fobel (christian@fobel.net)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Jonathan Hartley nor the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import warnings
from collections import OrderedDict
from lxml import etree
from path_helpers import path
from .path_parser import PathParser, ParseError
from ..loop import Loop
from ..geo_path import Path
class SvgParseError(Exception):
pass
def parse_warning(*args):
filename, tag, message = args
msg = 'Error parsing %s:%d, %s\n %s'
if filename:
filename = filename.name
warnings.warn(msg % (filename, tag.sourceline, message,
etree.tostring(tag)), RuntimeWarning)
class Svg(object):
'''
Maintains an ordered list of paths, each one corresponding to a path tag
from an SVG file. Creates a pylget Batch containing all these paths, for
rendering as a single OpenGL GL_TRIANGLES indexed vert primitive.
'''
def __init__(self):
self.paths = OrderedDict()
def add_path(self, id, path):
self.paths[id] = path
def add_to_batch(self, batch):
'''
Adds paths to the given batch object. They are all added as
GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL
primitive.
'''
for name in self.paths:
svg_path = self.paths[name]
svg_path.add_to_batch(batch)
def get_bounding_box(self):
points = list(self.all_verts())
x_vals = zip(*points)[0]
y_vals = zip(*points)[1]
min_x, min_y = min(x_vals), min(y_vals)
max_x, max_y = max(x_vals), max(y_vals)
return Loop([(min_x, min_y), (min_x, max_y), (max_x, max_y),
(max_x, min_y)])
def get_boundary(self):
if 'boundary' in self.paths:
boundary = self.paths['boundary']
else:
boundary = Path([self.get_bounding_box()])
return boundary
def all_verts(self):
for svg_
|
path in self.paths.itervalues():
for loop in svg_path.loops:
for vert in loop.verts:
yield vert
class SvgParser(object):
'''
parse(filename) returns an Svg object, populated from the <path> tags
in the file.
'''
def parse_file(self, filename, on_error=None):
self.filename = path(filename)
xml_root = etree.parse(self.filename)
ret
|
urn self.parse(xml_root, on_error)
def parse(self, xml_root, on_error=None):
'''
Parse all <path> elements from xml_root.
Optional on_error arg specifies a callback function to be run when
an error occurs during parsing.
The specified on_error function must accept 3 arguments:
<svg filename>, <path_tag>, <error message>
An example on_error handler is provided as svg_load.svg_parser.parse_warning(),
where all SvgParseErrors are converted to warning messages. See usage below:
>>> import re
>>> svg_parser = SvgParser()
>>> path_tag = etree.XML("""
... <path
... xmlns="http://www.w3.org/2000/svg"
... xmlns:dc="http://purl.org/dc/elements/1.1/"
... xmlns:cc="http://creativecommons.org/ns#"
... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
... xmlns:svg="http://www.w3.org/2000/svg"
... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
... xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
... id="path13231"
... d="M8 4 l-4,4"
... linecap="square"
... stroke="#000000"
... stroke-width="0.25"
... />""")
>>> with warnings.catch_warnings(record=True) as w:
... svg = svg_parser.parse(path_tag, on_error=parse_warning)
>>> print w[-1].category
<type 'exceptions.RuntimeWarning'>
>>> match = re.search(r'^Error parsing None:\d+, unsupported svg path command: l', str(w[-1].message))
>>> print match is None
False
>>> path_tag = etree.XML("""
... <path
... xmlns="http://www.w3.org/2000/svg" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
... xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#"
... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
... xmlns:svg="http://www.w3.org/2000/svg"
... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
... style="fill:#0000ff;stroke:#ff0000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none"
... id="path18327"
... d="M 636.0331,256.9345 L 636.0331,256.9345"
... inkscape:connector-curvature="0"/>""")
>>> with warnings.catch_warnings(record=True) as w:
... svg = svg_parser.parse(path_tag, on_error=parse_warning)
>>> print w[-1].category
<type 'exceptions.RuntimeWarning'>
>>> match = re.search(r'^Error parsing None:\d+, loop needs 3 or more verts', str(w[-1].message))
>>> print match is None
False
'''
svg = Svg()
svg_namespace = {'svg': 'http://www.w3.org/2000/svg'}
path_tags = xml_root.xpath('(/svg:svg|/svg:svg/svg:g)/svg:path',
namespaces=svg_namespace)
parser = PathParser()
for path_tag in path_tags:
try:
id, svg_path = parser.parse(path_tag)
if svg_path.loops:
svg.add_path(id, svg_path)
except (ParseError, ), why:
filename = getattr(self, 'filename', None)
args = (filename, path_tag, why.message)
if on_error:
on_error(*args)
else:
raise SvgParseError(*args)
if svg.paths:
x, y = svg.get_boundary().get_center()
for svg_path in svg.paths.values():
svg_path.offset(-x, -y)
return svg
|
Jacy-Wang/MyLeetCode
|
PowerofFour342.py
|
Python
|
gpl-2.0
| 232 | 0 |
c
|
lass Solution(object):
def isPowerOfFour(self, num):
"""
:type num: i
|
nt
:rtype: bool
"""
if num < 1:
return False
return num & (num - 1) == 0 and num & 0x55555555 > 0
|
rick446/MongoTools
|
mongotools/mim/__init__.py
|
Python
|
mit
| 67 | 0 |
from
|
.mim import Connection, match, MatchDoc, MatchList, BsonAri
|
th
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/lldbinline.py
|
Python
|
bsd-3-clause
| 7,168 | 0.000279 |
from __future__ import print_function
from __future__ import absolute_import
# System modules
import os
# Third-party modules
# LLDB modules
import lldb
from .lldbtest import *
from . import configuration
from . import lldbutil
from .decorators import *
def source_type(filename):
_, extension = os.path.splitext(filename)
return {
'.c': 'C_SOURCES',
'.cpp': 'CXX_SOURCES',
'.cxx': 'CXX_SOURCES',
'.cc': 'CXX_SOURCES',
'.m': 'OBJC_SOURCES',
'.mm': 'OBJCXX_SOURCES'
}.get(extension, None)
class CommandParser:
def __init__(self):
self.breakpoints = []
def parse_one_command(self, line):
parts = line.split('//%')
command = None
new_breakpoint = True
if len(parts) == 2:
command = parts[1].strip() # take off whitespace
new_breakpoint = parts[0].strip() != ""
return (command, new_breakpoint)
def parse_source_files(self, source_files):
for source_file in source_files:
file_handle = open(source_file)
lines = file_handle.readlines()
line_number = 0
# non-NULL means we're looking through whitespace to find
# additional commands
current_breakpoint = None
for line in lines:
line_number = line_number + 1 # 1-based, so we do this first
(command, new_breakpoint) = self.parse_one_command(line)
if new_breakpoint:
current_breakpoint = None
if command is not None:
if current_breakpoint is None:
current_breakpoint = {}
current_breakpoint['file_name'] = source_file
current_breakpoint['line_number'] = line_number
current_breakpoint['command'] = command
self.breakpoints.append(current_breakpoint)
else:
current_breakpoint['command'] = current_breakpoint[
'command'] + "\n" + command
def set_breakpoints(self, target):
for breakpoint in self.breakpoints:
breakpoint['breakpoint'] = target.BreakpointCreateByLocation(
breakpoint['file_name'], breakpoint['line_number'])
def handle_breakpoint(self, test, breakpoint_id):
for breakpoint in self.breakpoints:
if breakpoint['breakpoint'].GetID() == breakpoint_id:
test.execute_user_command(breakpoint['command'])
|
return
class InlineTest(TestBase):
# Internal implementation
|
def BuildMakefile(self):
makefilePath = self.getBuildArtifact("Makefile")
if os.path.exists(makefilePath):
return
categories = {}
for f in os.listdir(self.getSourceDir()):
t = source_type(f)
if t:
if t in list(categories.keys()):
categories[t].append(f)
else:
categories[t] = [f]
makefile = open(makefilePath, 'w+')
level = os.sep.join(
[".."] * len(self.mydir.split(os.sep))) + os.sep + "make"
makefile.write("LEVEL = " + level + "\n")
for t in list(categories.keys()):
line = t + " := " + " ".join(categories[t])
makefile.write(line + "\n")
if ('OBJCXX_SOURCES' in list(categories.keys())) or (
'OBJC_SOURCES' in list(categories.keys())):
makefile.write(
"LDFLAGS = $(CFLAGS) -lobjc -framework Foundation\n")
if ('CXX_SOURCES' in list(categories.keys())):
makefile.write("CXXFLAGS += -std=c++11\n")
makefile.write("include $(LEVEL)/Makefile.rules\n")
makefile.write("\ncleanup:\n\trm -f Makefile *.d\n\n")
makefile.flush()
makefile.close()
def _test(self):
self.BuildMakefile()
self.build()
self.do_test()
def execute_user_command(self, __command):
exec(__command, globals(), locals())
def do_test(self):
exe = self.getBuildArtifact("a.out")
source_files = [f for f in os.listdir(self.getSourceDir())
if source_type(f)]
target = self.dbg.CreateTarget(exe)
parser = CommandParser()
parser.parse_source_files(source_files)
parser.set_breakpoints(target)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
hit_breakpoints = 0
while lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint):
hit_breakpoints += 1
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
breakpoint_id = thread.GetStopReasonDataAtIndex(0)
parser.handle_breakpoint(self, breakpoint_id)
process.Continue()
self.assertTrue(hit_breakpoints > 0,
"inline test did not hit a single breakpoint")
# Either the process exited or the stepping plan is complete.
self.assertTrue(process.GetState() in [lldb.eStateStopped,
lldb.eStateExited],
PROCESS_EXITED)
# Utilities for testcases
def check_expression(self, expression, expected_result, use_summary=True):
value = self.frame().EvaluateExpression(expression)
self.assertTrue(value.IsValid(), expression + "returned a valid value")
if self.TraceOn():
print(value.GetSummary())
print(value.GetValue())
if use_summary:
answer = value.GetSummary()
else:
answer = value.GetValue()
report_str = "%s expected: %s got: %s" % (
expression, expected_result, answer)
self.assertTrue(answer == expected_result, report_str)
def ApplyDecoratorsToFunction(func, decorators):
tmp = func
if isinstance(decorators, list):
for decorator in decorators:
tmp = decorator(tmp)
elif hasattr(decorators, '__call__'):
tmp = decorators(tmp)
return tmp
def MakeInlineTest(__file, __globals, decorators=None):
# Adjust the filename if it ends in .pyc. We want filenames to
# reflect the source python file, not the compiled variant.
if __file is not None and __file.endswith(".pyc"):
# Strip the trailing "c"
__file = __file[0:-1]
# Derive the test name from the current file name
file_basename = os.path.basename(__file)
InlineTest.mydir = TestBase.compute_mydir(__file)
test_name, _ = os.path.splitext(file_basename)
test_func = ApplyDecoratorsToFunction(InlineTest._test, decorators)
# Build the test case
test_class = type(test_name, (InlineTest,), dict(test=test_func, name=test_name))
# Add the test case to the globals, and hide InlineTest
__globals.update({test_name: test_class})
# Keep track of the original test filename so we report it
# correctly in test results.
test_class.test_filename = __file
return test_class
|
sinharrajesh/dbtools
|
google-plus-analysis/clarify.py
|
Python
|
apache-2.0
| 1,344 | 0.004464 |
#!/usr/bin/python
import json
import logging
import sys
from datetime import datetime
import csv
if __name__ == '__main__':
_loggingLevel = logging.DEBUG ## How much trace
logger = logging.getLogger(__name__)
logging.basicConfig(level=_loggingLevel)
|
a = {}
|
altmetricFile = sys.argv[1]
with open(altmetricFile) as afData:
for line in afData:
data = line.rstrip('\n')
a[data] = 0
with open(sys.argv[2], 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter='$', quotechar='\'')
for line in spamreader:
id = line[0]
title = line[1]
url = line[2]
dateP = line[3]
restP = line[4]
actorId = line[5]
actorUrl = line[6]
actorDisplayName = line[7]
verb = line[8]
objectId = line[9]
objectActorId = line[10]
objectActorDisplayName = line[11]
objectContent = line[12]
if url not in a.keys():
in_altmetric = "N"
else:
in_altmetric = "Y"
print("%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%r" %(dateP, restP, id, title, in_altmetric, url, verb, actorDisplayName, actorId, actorUrl, objectId, objectActorId, objectActorDisplayName, objectContent))
|
cxxgtxy/tensorflow
|
tensorflow/python/eager/remote_test.py
|
Python
|
apache-2.0
| 24,106 | 0.008919 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import time
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import server_lib
from tensorflow.python.training.server_lib import ClusterSpec
class SingleWorkerTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(SingleWorkerTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def tearDown(self):
super(SingleWorkerTest, self).tearDown()
# Clear the current device scope to avoid polluting other test cases.
ops.device(None).__enter__()
# Reset the context to avoid polluting other test cases.
context._reset_context()
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceFunctionBasic(self):
@def_function.function
def basic(i):
with ops.device('/job:localhost/replica:0/task:0/cpu:0'):
a = constant_op.constant([2]) + i
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
b = constant_op.constant([1])
return a + b
self.assertAllEqual(basic(constant_op.constant([2])).numpy(), [5])
self.assertAllEqual(basic(constant_op.constant([1])).numpy(), [4])
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceFunctionVariable(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def with_variable(i):
return i + variable_b
self.assertAllEqual(with_variable(constant_op.constant([2])).numpy(), [3])
def testMultiDeviceFunctionRemoteOutput(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def remote_output(i):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
c = variable_b + 1
return i + variable_b, c
rets = remote_output(constant_op.constant([1]))
self.assertAllEqual(rets[0].numpy(), [2])
self.assertAllEqual(rets[1].numpy(), 2)
self.assertEqual(rets[0].backing_device,
'/job:localhost/replica:0/task:0/device:CPU:0')
self.assertEqual(rets[1].backing_device,
'/job:worker/replica:0/task:0/device:CPU:0')
def testMultiDeviceFunctionAmbiguousDevice(self):
@def_function.function
def ambiguous_device(i):
with ops.device('cpu:0'):
return i + constant_op.constant([2])
|
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
ambiguous_device(constant_op.constant([2])).numpy()
self.assertIn('the output node must match exactly one device',
cm.exception.message)
def testStreaming(self):
"""A mini stress test for streaming - issuing many RPCs back to
|
back."""
with ops.device('job:worker/replica:0/task:0/device:CPU:0'):
x = array_ops.ones([2, 2])
y = array_ops.zeros([2, 2])
num_iters = 200
for _ in range(num_iters):
y = x + y
# Ask for y's shape after every 10 additions on average.
# This exercises waiting for remote shape logic in TensorHandle.
if random.randint(1, 10) == 1:
_ = y.shape
np.testing.assert_array_equal(
[[num_iters, num_iters], [num_iters, num_iters]], y.numpy())
def testShapeError_OpByOp(self):
with ops.device('job:worker/replica:0/task:0/device:CPU:0'):
x = array_ops.ones([2, 3])
y = array_ops.zeros([2, 2])
with self.assertRaises(errors.InvalidArgumentError) as cm:
math_ops.matmul(x, y)
self.assertIn('Dimensions must be equal', cm.exception.message)
@test_util.eager_lazy_remote_copy_on_and_off
def testShapeError_Function(self):
@def_function.function
def matmul_func(x, y):
return math_ops.matmul(x, y)
x = array_ops.ones([2, 3])
y = array_ops.zeros([2, 2])
with ops.device('job:worker/replica:0/task:0/device:CPU:0'):
with self.assertRaises(ValueError) as cm:
matmul_func(x, y)
if six.PY2:
self.assertIn('Dimensions must be equal', cm.exception.message)
else:
self.assertIn('Dimensions must be equal', cm.exception.args[0])
def testClientVarible(self):
var = variables.Variable(initial_value=0)
@def_function.function
def func():
with ops.device('/job:localhost/task:0'):
read = var.read_value()
return read + 1
with ops.device('/job:worker/task:0'):
self.assertAllEqual(func(), 1)
@test_util.eager_lazy_remote_copy_on_and_off
def testRemoteCall(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def _remote_fn(x):
return constant_op.constant(1) + x
remote_fn = _remote_fn.get_concrete_function()
@def_function.function
def func(x):
return functional_ops.remote_call(
args=[x],
Tout=[dtypes.int32],
f=remote_fn,
target='/job:worker/task:0')
with ops.device('/job:localhost/task:0'):
self.assertAllEqual(func(constant_op.constant(1)), [2])
class RemoteAsyncTest(test.TestCase):
def setUp(self):
super(RemoteAsyncTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def tearDown(self):
super(RemoteAsyncTest, self).tearDown()
# Reset the context to avoid polluting other test cases.
context._reset_context()
def test_out_of_range_with_while_loop(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset = dataset.batch(1, drop_remainder=False)
iterator = iter(dataset)
v = variables.Variable(1.0)
@def_function.function
def train_step(iterator):
i = next(iterator)
v.assign_add(math_ops.reduce_mean(i))
while True:
try:
with ops.device('/job:worker/task:0'):
train_step(iterator)
except (errors.OutOfRangeError, errors.InternalError):
context.async_clear_error()
break
self.assertAllEqual(v.numpy(), 4.0)
def test_out_of_range_with_for_loop(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset
|
google-research/jax3d
|
jax3d/projects/nesf/nerfstatic/utils/camera_utils_test.py
|
Python
|
apache-2.0
| 1,892 | 0.0037 |
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax3d.projects.nesf.nerfstatic.utils.camera_utils."""
import chex
from jax3d.projects.nesf.nerfstatic.utils import camera_utils
from jax3d.projects.nesf.nerfstatic.utils import types
import numpy as np
import pytest
def _batched_array(val, dtype=None):
"""Returns the array with leading `1` dimension."""
return np.array(val, dtype=dtype)[None, ...]
# TODO(epot): Support the np.array case. Camera should work for both batched
# and non-batched arrays.
@pytest.mark.parametrize('np_array', [_batched_array])
def test_camera(np_array):
camera = camera_utils.Camera.from_position_and_quaternion(
positions=np_array([2., 0., 1.,]),
quaternions=np_array([0.1, 0.2, 0.3, 0.4]),
resolution=(2, 2),
focal_px_length=280.,
)
rays = ca
|
mera.pixel_centers2rays()
expected_rays = types.Rays(
scene_id=None,
origin=np_array([
[[2., 0., 1.],
[2., 0., 1.]],
[[2., 0., 1.],
[2., 0., 1.]],
]),
direction=np_array([
[[-0.27698026, -0.24996764, -0.92779206],
[-0.2750864, -0.24938536, -0.92851
|
193]],
[[-0.27663719, -0.25217938, -0.92729576],
[-0.27474675, -0.25160123, -0.92801457]],
]),
)
chex.assert_tree_all_close(rays, expected_rays, ignore_nones=True)
|
UManPychron/pychron
|
pychron/hardware/linear_axis.py
|
Python
|
apache-2.0
| 2,566 | 0 |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Float, Property
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.core.abstract_device import AbstractDevice
class LinearAxis(AbstractDevice):
position = Property(depends_on='_position')
_position = Float
min_value = Float(0.0)
max_value = Float(100.0)
min_limit = Property(depends_on='_position')
max_limit = Property(depends_on='_position')
_slewing = False
def set_home(self):
if self._cdevice:
self._cdevice.set_home()
def set_position(self, v, **kw):
if self._cdevice:
self._cdevice.set_position(v, **kw)
# self.add_consumable((self._cdevice.set_position, v, kw))
# def relative_move(self, v):
# self.set_position(self._position + v)
def is_slewing(self):
return self._slewing
def is_stalled(self):
if self
|
._cdevice:
return self._cdevice.stal
|
led()
def slew(self, modifier):
if self._cdevice:
self._slewing = True
self._cdevice.slew(modifier)
def stop(self):
if self._cdevice:
self._slewing = False
self._cdevice.stop_drive()
def _get_min_limit(self):
return abs(self._position - self.min_value) < 1e-5
def _get_max_limit(self):
return abs(self._position - self.max_value) < 1e-5
def _get_position(self):
return float('{:0.3f}'.format(self._position))
def _set_position(self, v):
self._position = v
if self._cdevice:
self.set_position(v)
# ============= EOF =============================================
|
hasauino/multi_kobuki_gazebo
|
scripts/tot_r1.py
|
Python
|
mit
| 2,113 | 0.056791 |
#!/usr/bin/env python
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from std_msgs.msg import String
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
import actionlib_msgs.msg
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib
import tf
from os import system
from random import random
from numpy import array,concatenate,vstack,delete,floor,ceil
from numpy import linalg as LA
from numpy import all as All
from time import time
#-----------------------------------------------------
# Subscribers' callbacks------------------------------
mapData=OccupancyGrid()
def mapCallBack(data):
global mapData
mapData=data
# Node---
|
-------------------------------------------
def node():
rospy.init_node('distanceCounter1', anonymous=False)
#-------------------------------------------
rate = rospy.Rate(50)
listener = tf.TransformListener()
listener.waitForTransform('/robot_1/odom', '/robot_1/base_link', rospy.Time(0),rospy.Duration(50.0))
try:
(trans,rot) = listener.lookupTransf
|
orm('/robot_1/odom', '/robot_1/base_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
trans=[0,0]
xinx=trans[0]
xiny=trans[1]
xprev=array([xinx,xiny])
distance=0
t0=time()
#-------------------------------RRT------------------------------------------
while not rospy.is_shutdown():
(trans,rot)=listener.lookupTransform('/robot_1/odom', '/robot_1/base_link', rospy.Time(0))
xinx=int(trans[0]*1000)/1000.0
xiny=int(trans[1]*1000)/1000.0
xnew=array([xinx,xiny])
distance+=LA.norm(xnew-xprev)
print distance," elapsed ",(time()-t0)," sec"
xprev=array([xinx,xiny])
rate.sleep()
#_____________________________________________________________________________
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
|
aesaae/ardupilot_str
|
Tools/ardupilotwaf/git_submodule.py
|
Python
|
gpl-3.0
| 3,169 | 0.002524 |
#!/usr/bin/env python
# encoding: utf-8
"""
Waf tool for defining ardupilot's submodules, so that they are kept up to date.
Submodules can be considered dynamic sources, since they are updated during the
build. Furthermore, they can be used to generate other dynamic sources (mavlink
headers generation, for example). Thus, the correct use of this tool should
have three build groups: first one for updating the submodules, second for
generating any dynamic source from them, and the last one for the build. And
post_mode should be set to POST_LAZY. Example::
def build(bld):
bld.post_mode = waflib.Build.POST_LAZY
bld.add_group('git_submodules')
# gtest submodule
bld(
features='git_submodule'
git_submodule='gtest',
)
# mavlink submodule with syntactic sugar
bld.git_submodule('mavlink')
...
# now, for the dynamic sources
bld.add_group('dynamic_sources')
...
# now, below go the task generators for normal build process
bld.add_group('build')
...
"""
from waflib import Context, Task, Utils
from waflib.Configure import conf
from waflib.TaskGen import before_method, feature, taskgen_method
import os.path
class update_submodule(Task.Task):
color = 'BLUE'
run_str = '${GIT} -C ${SRC_ROOT} submodule update --init -- ${SUBMODULE_PATH}'
def runnable_status(self):
e = self.env.get_flat
cmd = e('GIT'), '-C', e('SRC_ROOT'), 'submodule', 'status', '--', e('SUBMODULE_PATH')
out = self.generator.bld.cmd_and_log(cmd, quiet=Context.BOTH)
# git submodule status uses a blank prefix for submodules that are up
# to date
if out[0] != ' ':
return Task.RUN_ME
return Task.SKIP_ME
def uid(self):
if not hasattr(self, 'uid_'):
m = Utils.md5()
m.update(self.__class__.__name__)
m.update(self.env.get_flat('SUBMODULE_PATH'))
self.uid_ = m.digest()
return self.uid_
def __str__(self):
return 'Submodule update: %s' % self.submodule
def configure(cfg):
cfg.find_program('git')
_submodules_tasks = {}
@taskgen_method
def git_submodule_update(self, name):
if name not in _submodules_tasks:
module_node = self.bld.srcnode.make_node(os.path.join('modules', name))
tsk = self.create_task('update_submodule', submodule=name)
tsk.env.SRC_ROOT = self.bld.sr
|
cnode.abspath()
tsk.env.SUBMODULE_PATH = module_node.abspath()
_submodules_tasks[name]
|
= tsk
return _submodules_tasks[name]
@feature('git_submodule')
@before_method('process_source')
def process_module_dependencies(self):
self.git_submodule = getattr(self, 'git_submodule', '')
if not self.git_submodule:
self.bld.fatal('git_submodule: empty or missing git_submodule argument')
self.git_submodule_update(self.git_submodule)
@conf
def git_submodule(bld, git_submodule, **kw):
kw['git_submodule'] = git_submodule
kw['features'] = Utils.to_list(kw.get('features', ''))
kw['features'].append('git_submodule')
return bld(**kw)
|
miltondp/ukbrest
|
tests/test_password_hasher.py
|
Python
|
gpl-3.0
| 5,529 | 0.000723 |
import os
import unittest
from shutil import copyfile
from ruamel.yaml import YAML
from ukbrest.common.utils.auth import PasswordHasher
from tests.utils import get_repository_path
class WSGIFunctions(unittest.TestCase):
def load_data(self, filepath):
yaml = YAML()
with open(filepath, 'r') as f:
return yaml.load(f)
def test_process_users_file_test00(self):
# prepare
orig_user_file = get_repository_path('wsgi/test00/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
|
orig_users = self.load_data(orig_user_file)
|
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
users = self.load_data(users_file)
assert len(users) == 3
for user, password in users.items():
assert user in orig_users.keys(), user
assert password != orig_users[user], password + ' / ' + orig_users[user]
assert 90 < len(password) < 96, (len(password), password)
os.remove(users_file)
def test_process_users_file_file_does_not_exist_test00(self):
# prepare
users_file = get_repository_path('no/existing/file/here.txt')
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
def test_process_users_file_already_hashed_test00(self):
# prepare
orig_user_file = get_repository_path('wsgi/test00/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
orig_users = self.load_data(orig_user_file)
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
users = self.load_data(users_file)
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
new_users = self.load_data(users_file)
assert len(users) == 3
for user, password in new_users.items():
assert user in orig_users.keys(), user
assert password == users[user], password + ' / ' + users[user]
assert 90 < len(password) < 96, (len(password), password)
os.remove(users_file)
def test_process_users_file_one_password_hashed_rest_not_test01(self):
# prepare
orig_user_file = get_repository_path('wsgi/test01/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
orig_users = self.load_data(orig_user_file)
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
users = self.load_data(users_file)
assert len(users) == 3
for user, password in users.items():
assert user in orig_users.keys(), user
if user != 'adams':
assert password != orig_users[user], user + ' / ' + password + ' / ' + orig_users[user]
else:
assert password == users[user], user + password + ' / ' + users[user]
assert 90 < len(password) < 96, (len(password), password)
os.remove(users_file)
def test_verify_password_test01(self):
# prepare
orig_user_file = get_repository_path('wsgi/test01/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
assert not ph.verify_password('milton', 'whatever')
assert ph.verify_password('john', 'mypassword')
assert ph.verify_password('adams', 'anotherpassword')
assert ph.verify_password('james', 'mypassword')
os.remove(users_file)
def test_verify_password_users_file_does_not_exist_test01(self):
# prepare
users_file = get_repository_path('no/existing/file/here.txt')
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert not ph.verify_password('milton', 'whatever')
assert not ph.verify_password('john', 'mypassword')
assert not ph.verify_password('adams', 'anotherpassword')
assert not ph.verify_password('james', 'mypassword')
def test_verify_password_users_file_empty_test01(self):
# prepare
orig_user_file = get_repository_path('wsgi/test02/users.txt')
users_file = orig_user_file + '.bak'
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert not ph.verify_password('milton', 'whatever')
assert not ph.verify_password('john', 'mypassword')
assert not ph.verify_password('adams', 'anotherpassword')
assert not ph.verify_password('james', 'mypassword')
def test_verify_password_users_file_none_test01(self):
# prepare
ph = PasswordHasher(None, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert ph.verify_password('milton', 'whatever')
assert ph.verify_password('john', 'mypassword')
assert ph.verify_password('adams', 'anotherpassword')
assert ph.verify_password('james', 'mypassword')
|
bameda/monarch
|
back/settings/common.py
|
Python
|
agpl-3.0
| 3,078 | 0.00195 |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
ADMINS = (
("David Barragán", "bameda@dbarragan.com"),
)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0q)_&-!hu%%en55a&cx!a2c^7aiw*7*+^zg%_&vk9&4&-4&qg#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWE
|
D_HOSTS = ['*']
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaSc
|
ript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'monarch.base',
'monarch.documents',
'monarch.users',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'monarch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'TEMPLATE_DEBUG': False,
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
|
royragsdale/picoCTF
|
picoCTF-web/api/cache.py
|
Python
|
mit
| 4,558 | 0.000658 |
"""Caching Library using redis."""
import logging
from functools import wraps
from flask import current_app
from walrus import Walrus
import api
import hashlib
import pickle
from api import PicoException
log = logging.getLogger(__name__)
__redis = {
"walrus": None,
"cache": None,
"zsets": {"scores": None},
}
def get_conn():
"""Get a redis connection, reusing one if it exists."""
global __redis
if __redis.get("walrus") is None:
conf = current_app.config
try:
__redis["walrus"] = Walrus(
host=conf["REDIS_ADDR"],
port=conf["REDIS_PORT"],
password=conf["REDIS_PW"],
db=conf["REDIS_DB_NUMBER"],
)
except Exception as error:
raise PicoException(
"Internal server error. " + "Please contact a system administrator.",
data={"original_error": error},
)
return __redis["walrus"]
def get_cache():
"""Get a walrus cache, reusing one if it exists."""
global __redis
if __redis.get("cache") is None:
__redis["cache"] = get_conn().cache(default_timeout=0)
return __redis["cache"]
def get_score_cache():
global __redis
if __redis["zsets"].get("scores") is None:
__redis["zsets"]["scores"] = get_conn().ZSet("scores")
return __redis["zsets"]["scores"]
def get_scoreboard_cache(**kwargs):
global __redis
scoreboard_name = "scoreboard:{}".format(_hash_key((), kwargs))
if __redis["zsets"].get(scoreboard_name) is None:
__redis["zsets"][scoreboard_name] = get_conn().ZSet(scoreboard_name)
return __redis["zsets"][scoreboard_name]
def clear():
global __redis
if __redis.get("walrus") is not None:
__redis["walrus"].flushdb()
def __insert_cache(f, *args, **kwargs):
"""
Directly upserting without first invalidating, thus keeping a memoized
value available without lapse
"""
if f == api.stats.get_score:
raise PicoException("Error: Do not manually reset_cache get_score")
else:
key = "%s:%s" % (f.__name__, _hash_key(args, kwargs))
value = f(*args, **kwargs)
get_cache().set(key, value)
return value
def memoize(_f=None, **cached_kwargs):
"""walrus.Cache.cached wrapper that reuses shared cache."""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs
|
):
if kwargs.get("reset_cache", False):
kwargs.pop("reset_cache", None)
return __insert_cache(f, *args, **kwargs)
else:
return get_cache().cached(**cached_kwargs)(f)(*args, **kwargs)
return wrapper
if _f is None:
return decorator
else:
return decorator(_f)
def _hash_key(a, k):
return hashlib.md5(pickle.dumps((a, k))).hexdigest()
def get_scoreboard_key(team):
# For lack of better idea of deli
|
miter, use '>' illegal team name char
return "{}>{}>{}".format(team["team_name"], team["affiliation"], team["tid"])
def decode_scoreboard_item(item, with_weight=False, include_key=False):
"""
:param item: tuple of ZSet (key, score)
:param with_weight: keep decimal weighting of score, or return as int
:param include_key: whether to include to raw key
:return: dict of scoreboard item
"""
key = item[0].decode("utf-8")
data = key.split(">")
score = item[1]
if not with_weight:
score = int(score)
output = {"name": data[0], "affiliation": data[1], "tid": data[2], "score": score}
if include_key:
output["key"] = key
return output
def search_scoreboard_cache(scoreboard, pattern):
"""
:param scoreboard: scoreboard cache ZSet
:param pattern: text pattern to search team names and affiliations,
not including wildcards
:return: sorted list of scoreboard entries
"""
# Trailing '*>' avoids search on last token, tid
results = [
decode_scoreboard_item(item, with_weight=True, include_key=True)
for item in list(scoreboard.search("*{}*>*".format(pattern)))
]
return sorted(results, key=lambda item: item["score"], reverse=True)
def invalidate(f, *args, **kwargs):
"""
Clunky way to replicate busting behavior due to awkward wrapping of walrus
cached decorator
"""
if f == api.stats.get_score:
key = args[0]
get_score_cache().remove(key)
else:
key = "%s:%s" % (f.__name__, _hash_key(args, kwargs))
get_cache().delete(key)
|
etkirsch/legends-of-erukar
|
erukar/system/engine/lifeforms/Player.py
|
Python
|
agpl-3.0
| 1,072 | 0.027052 |
from .Lifeform import Lifeform
from erukar.system.engine import Indexer
from erukar.ext.math.Distance import Distance
class Player(Lifeform, Indexer):
def __init__(self, world=None):
Indexer.__init__(self)
super().__init__(world)
self.faction = 'iurian'
self.uid = '' # Player UID
self.credits = 0
self.define_level(1)
|
def alias(self):
return self.uid
def lifeform(self):
return self
def generate_tile(self, dimensions, tile_id):
h, w = di
|
mensions
radius = int(w/3)-1
circle = list(Distance.points_in_circle(radius, (int(h/2),int(w/2))))
inner_circle = list(Distance.points_in_circle(int(w/4)-1, (int(h/2),int(w/2))))
for y in range(h):
for x in range(w):
if (x,y) in circle:
if (x,y) not in inner_circle:
yield {'r':0,'g':0,'b':0,'a':1}
else:
yield {'r':0,'g':255,'b':0,'a':1}
else: yield {'r':0,'g':0,'b':0,'a':0}
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Policy/test/Test_RSS_Policy_JobEfficiencyPolicy.py
|
Python
|
gpl-3.0
| 3,905 | 0.06274 |
''' Test
|
_RSS_Policy_JobEfficiencyPolicy
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import DIRAC.ResourceStatusSystem.Policy.JobEfficiencyPolicy as moduleTested
################################################################################
class JobEfficiencyPolicy_TestCase( unittest.TestCase ):
def setUp( self ):
|
'''
Setup
'''
self.moduleTested = moduleTested
self.testClass = self.moduleTested.JobEfficiencyPolicy
def tearDown( self ):
'''
Tear down
'''
del self.moduleTested
del self.testClass
################################################################################
class JobEfficiencyPolicy_Success( JobEfficiencyPolicy_TestCase ):
def test_instantiate( self ):
''' tests that we can instantiate one object of the tested class
'''
module = self.testClass()
self.assertEqual( 'JobEfficiencyPolicy', module.__class__.__name__ )
def test_evaluate( self ):
''' tests the method _evaluate
'''
module = self.testClass()
res = module._evaluate( { 'OK' : False, 'Message' : 'Bo!' } )
self.assertTrue(res['OK'])
self.assertEqual( 'Error', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Bo!', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : None } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{}] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 0, 'Failed' : 0 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Not enough jobs to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 0, 'Failed' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Not enough jobs to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 0, 'Failed' : 10 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Banned', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Jobs Efficiency of 0.00', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 8, 'Failed' : 2 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Degraded', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Jobs Efficiency of 0.80', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 10, 'Done' : 9, 'Failed' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Active', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Jobs Efficiency of 0.95', res[ 'Value' ][ 'Reason' ] )
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( JobEfficiencyPolicy_TestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( JobEfficiencyPolicy_Success ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
DmitryADP/diff_qc750
|
vendor/nvidia/tegra/3rdparty/python-support-files/src/Lib/random.py
|
Python
|
gpl-2.0
| 31,938 | 0.002536 |
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* Without a direct way to compute N steps forward, the semantics of
jumpahead(n) are weakened to simply jump to another distant state and rely
on the large period to avoid overlapping sequences.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from __future__ import division
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
Optionally, implement a getrandbits() method so that randrange() can cover
arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple( long(x) % (2**32) for x in internalstate )
except ValueError, e:
raise TypeError, e
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint
|
; in Python this is usually not what you want.
Do not supply the 'int', 'default', and 'maxwidth' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
|
if stop is default:
if istart > 0:
if istart >= maxwidth:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= maxwidth:
ret
|
vipints/oqtans
|
oqtans_tools/PALMapper/0.5/galaxy/genomemapper_wrapper.py
|
Python
|
bsd-3-clause
| 4,421 | 0.013798 |
#! /usr/bin/python
"""
Runs GenomeMapper on single-end or paired-end data.
"""
import optparse, os, sys, tempfile
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option('', '--threads', dest='threads', help='The number of threads to run')
parser.add_option('', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format')
parser.add_option('', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format')
parser.add_option('', '--output', dest='output', help='The output file')
parser.add_option('', '--paired', dest='paired', help='Whether the data is single- or paired-end')
parser.add_option('', '--genomeSource', dest='genomeSource', help='The type of reference provided')
parser.add_option('', '--ref', dest='ref', help='The reference genome to use or index')
parser.add_option('', '--indexSettings', dest='index_settings', help='Whether or not indexing options are to be set')
parser.add_option('', '--params', dest='params', help='Whether to use default or specified parameters')
parser.add_option('', '--seedlength', dest='seedlength', help='GenomeMapper Index Seed Length')
parser.add_option('', '--alignseedlength', dest='alignseedlength', help='GenomeMapper Alignment Seed Length')
parser.add_option('', '--format', dest='format', help='Output format (bed or shore)')
parser.add_option('', '--maxmismatches', dest='maxmismatches', help='Maximal number of mismatches')
parser.add_option('', '--maxgaps', dest='maxgaps', help='Maximal number of gaps')
parser.add_option('', '--maxedits', dest='maxedits', help='Maximal number of edit operations')
parser.add_option('', '--reportall', dest='reportall', help='Report all hits')
(options, args) = parser.parse_args()
# index if necessary
if options.genomeSource == 'history':
# set up commands
if options.index_settings =='index_pre_set':
indexing_cmds = ''
else:
try:
indexing_cmds = '%s ' % \
(('','-s %s'%options.seedlength)[options.seedlength!='None' and options.seedlength>=1])
except ValueError:
indexing_cmds = ''
# make temp directory for placement of indices and copy reference file there
tmp_dir = tempfile.gettempdir()
try:
os.system('cp %s %s' % (options.ref, tmp_dir))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(tmp_dir, os.path.split(options.ref)[1])
|
cmd1 = 'gmindex -v -i %s %s' % (options.ref, indexing_cmds)
try:
os.system(cmd1)
except Exception, erf:
stop_err('Error indexing reference sequence\n' + str(erf))
if options.params == 'pre_set':
aligning_cmds = '-v '
else:
try:
print options
aligning_cmds = '%s %s %s %s %s %s -v ' % \
(('','-f %s' % options.f
|
ormat)[options.format!='None'],
('','-a')[options.reportall!='None'],
('','-M %s' % options.maxmismatches)[options.maxmismatches!='None'],
('','-G %s' % options.maxgaps)[options.maxgaps!='None'],
('','-E %s' % options.maxedits)[options.maxedits!='None'],
('','-l %s' % options.alignseedlength)[options.alignseedlength!='None'])
except ValueError, erf:
stop_err('Something is wrong with the alignment parameters and the alignment could not be run\n' + str(erf))
# prepare actual aligning commands
if options.paired == 'paired':
print "Sorry, paired end alignments are not implemented yet"
return
#cmd2 = 'genomemapper %s %s -1 %s -2 %s > %s ' % (options.ref, options.input1, options.input2, options.output)
else:
cmd2 = 'genomemapper %s -i %s -q %s -o %s ' % (aligning_cmds, options.ref, options.input1, options.output)
# align
try:
print cmd2
os.system(cmd2)
except Exception, erf:
stop_err("Error aligning sequence\n" + str(erf))
if __name__=="__main__": __main__()
|
bobjacobsen/SPShasta
|
userfiles/ConfigureCtcControlLogic.py
|
Python
|
gpl-2.0
| 21,226 | 0.015076 |
# Configure SP Shasta CTC machine support
#
# Exensively uses the jmri.jmrit.ussctc package capabilities
#
# Author: Bob Jacobsen, copyright 2016-17
#
import jmri
from jmri.jmrit.ussctc import *
import jarray
import java.util
def arrayList(contents) :
retval = java.util.ArrayList()
for item in contents :
retval.add(item)
return retval
# When the call-on turnout is set THROWN, show restricting on signals
class ForceRestrictingWhenCallOn(java.beans.PropertyChangeListener):
def set(self, callOnName, groupList) :
self.callon = turnouts.getTurnout(callOnName)
self.grou
|
pNames = groupList
# set up listeners
self.callon.addPropertyChangeListener(self)
for name in self.groupNames :
signals.getSignalHead(name).addPropertyChangeListener(self) # need to fix it if held
return
def propertyChange(self, event):
if (event.source == self.c
|
allon) :
if (self.callon.state == THROWN) :
for name in self.groupNames :
logic = jmri.jmrit.blockboss.BlockBossLogic.getExisting(signals.getSignalHead(name))
print "Setting logic", logic
logic.setRestrictingSpeed1(True)
logic.setRestrictingSpeed2(True)
signals.getSignalHead(name).setHeld(False) # sets output too
else :
for name in self.groupNames :
logic = jmri.jmrit.blockboss.BlockBossLogic.getExisting(signals.getSignalHead(name))
logic.setRestrictingSpeed1(False)
logic.setRestrictingSpeed2(False)
signals.getSignalHead(name).setHeld(True) # sets output too
else :
if (event.propertyName == "Held") :
if (self.callon.state == THROWN and event.source.held != False) : event.source.setHeld(False)
return
class ConfigureCtcControlLogic(jmri.jmrit.automat.AbstractAutomaton) :
def init(self):
return
def handle(self):
# delay long enough for debug init to run if present, polling to start, turnouts to be in place, plus a bit more
self.waitMsec(1000+8000+2000+500) # time is in milliseconds
print "ConfigureCtcControlLogic starts"
# The code line is shared by all Stations
codeline = CodeLine("CTC Code Indication Driver", "CTC Code Send Driver", "IT CODE MOD 1", "IT CODE MOD 2", "IT CODE MOD 3", "IT CODE MOD 4")
bell = PhysicalBell("CTC Bell")
vbell = VetoedBell("CTC Bell Cutout", bell)
# ===== Set up Station 1/2 ===== (TODO: NOT FULLY CONFIGURED)
button = CodeButton("CTC 02 Code A", "CTC 02 Code")
station = Station("1/2", codeline, button)
CombineTurnouts().set("CTC TC 01", ["CTC TC 01A"]) # right-side repeater
CombineTurnouts().set("CTC TC 02", ["CTC TC 02A"]) # right-side repeater
station.add(TrackCircuitSection("TC 01","CTC TC 01", station)) # -1 main
station.add(TrackCircuitSection("TC 02","CTC TC 02", station)) # -3 siding
station.add(TrackCircuitSection("TC 03","CTC TC 03", station)) # 1 OS
turnout1 = TurnoutSection("Helix Level 2 B", "CTC 01 N", "CTC 01 R", "CTC 01 N", "CTC 01 R", station)
station.add(turnout1)
# ===== Set up Station 3/4 ===== (TODO: NOT FULLY CONFIGURED)
button = CodeButton("CTC 04 Code A", "CTC 04 Code")
station = Station("3/4", codeline, button)
station.add(TrackCircuitSection("TC 04","CTC TC 04", station)) # 3 OS
turnout3 = TurnoutSection("Helix Level 2 A", "CTC 03 N", "CTC 03 R", "CTC 03 N", "CTC 03 R", station)
station.add(turnout3)
# ===== Set up Station 5/6 ===== (TODO: NOT FULLY CONFIGURED)
button = CodeButton("CTC 06 Code A", "CTC 06 Code")
station = Station("5/6", codeline, button)
station.add(TrackCircuitSection("TC 05","CTC TC 05", station)) # 1-5 siding
station.add(TrackCircuitSection("TC 06","CTC TC 06", station)) # 3-5 main
station.add(TrackCircuitSection("TC 07","CTC TC 07", station, bell)) # 5 OS
turnout5 = TurnoutSection("Helix Level 1", "CTC 05 N", "CTC 05 R", "CTC 05 N", "CTC 05 R", station)
station.add(turnout5)
station.add(MaintainerCallSection("CTC 06 Call","MC 6", station))
# ===== Set up Station 7/8 =====
button = CodeButton("CTC 08 Code A", "CTC 08 Code")
station = Station("7/8", codeline, button)
station.add(TrackCircuitSection("TC 08","CTC TC 08", station)) # 5-7 track
station.add(TrackCircuitSection("TC 09","CTC TC 09", station, bell)) # Redding approach
station.add(TrackCircuitSection("TC 10","CTC TC 10", station, vbell)) # OS 7
turnout7 = TurnoutSection("TO 07", "CTC 07 N", "CTC 07 R", "CTC 07 N", "CTC 07 R", station)
station.add(turnout7)
rightward = arrayList(["08 R from Helix", "08 R from Staging"])
leftward = arrayList(["08 L Upper", "08 L Lower"])
signal = SignalHeadSection(rightward, leftward, "CTC 08 L", "CTC 08 C", "CTC 08 R", "CTC 08 L", "CTC 08 R", station);
station.add(signal)
occupancyLock = OccupancyLock("TC 10")
routeLock = RouteLock(["08 R from Helix", "08 R from Staging", "08 L Upper", "08 L Lower"]);
turnout7.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal)]));
# ===== Set up Station 9/10 =====
button = CodeButton("CTC 10 Code A", "CTC 10 Code")
station = Station("9/10", codeline, button)
station.add(TrackCircuitSection("TC 11","CTC TC 11", station)) # 7-9
station.add(TrackCircuitSection("TC 12","CTC TC 12", station, vbell)) # OS 9
turnout9 = TurnoutSection("TO 09", "CTC 09 N", "CTC 09 R", "CTC 09 N", "CTC 09 R", station)
station.add(turnout9)
rightward = arrayList(["10 R Upper", "10 R Lower"])
leftward = arrayList(["10 L Main", "10 L Siding"])
signal = SignalHeadSection(rightward, leftward, "CTC 10 L", "CTC 10 C", "CTC 10 R", "CTC 10 L", "CTC 10 R", station);
station.add(signal)
occupancyLock = OccupancyLock("TC 12")
routeLock = RouteLock(["10 R Upper", "10 R Lower", "10 L Main", "10 L Siding"]);
turnout9.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal)]));
# ===== Set up Station 13/14/16 =====
button = CodeButton("CTC 14 Code A", "CTC 14 Code")
station = Station("13/14/16", codeline, button)
station.add(TrackCircuitSection("TC 13","CTC TC 13", station)) # 9-13 siding
station.add(TrackCircuitSection("TC 14","CTC TC 14", station)) # 9-13 main
station.add(TrackCircuitSection("TC 15","CTC TC 15", station, vbell)) # OS 13 siding
station.add(TrackCircuitSection("TC 16","CTC TC 16", station, vbell)) # OS 13 main
turnout13 = TurnoutSection("TO 13", "CTC 13 N", "CTC 13 R", "CTC 13 N", "CTC 13 R", station)
station.add(turnout13)
rightward = arrayList(["14 R Main"])
leftward = arrayList(["14 L Main"])
signal1 = SignalHeadSection(rightward, leftward, "CTC 14 L", "CTC 14 C", "CTC 14 R", "CTC 14 L", "CTC 14 R", station);
station.add(signal1)
rightward = arrayList(["16 R Siding"])
leftward = arrayList(["16 L Bridge Upper", "16 L Bridge Lower", "16 L Siding"])
signal2 = SignalHeadSection(rightward, leftward, "CTC 16 L", "CTC 16 C", "CTC 16 R", "CTC 16 L", "CTC 16 R", station);
station.add(signal2)
occupancyLock = CombinedLock([OccupancyLock("TC 15"), OccupancyLock("TC 16")])
routeLock = RouteLock(["14 R Main", "16 R Siding", "14 L Main", "16 L Bridge Upper", "16 L Bridge Lower", "16 L Siding"]);
turnout13.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal1), TimeLock(signal2)]));
# ===== Set up Station 17/18/20 =====
button = CodeButton("CTC 18 Code A", "CTC 18 Code")
station = Station("17/18/20", codeline, button)
station.add(TrackCircuitSection("TC 17","CTC TC 17", station)) # 13-17 main
station.add(TrackCircuitSection("TC 18","CTC TC 18", station)) # 13-17 siding
station.add(TrackCircuitSection("TC 19","CTC TC 19", station, vbell)) # OS 17 main
station.add(TrackCircuitSection("TC 20","CTC TC 20", station, vbell)) # OS 17 siding
turnout17 = TurnoutSection("TO 17", "CTC 17 N", "CTC 17 R", "CTC 17 N", "CTC 17 R", station)
station.add(turnout17)
rightward = arrayList(["18 R"])
|
tfroehlich82/saleor
|
saleor/wsgi/__init__.py
|
Python
|
bsd-3-clause
| 1,225 | 0.001633 |
"""
WSGI config for saleor project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named
|
``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
|
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
from .health_check import health_check
application = health_check(application, '/health/')
|
nromashchenko/amquery
|
amquery/utils/decorators/__init__.py
|
Python
|
mit
| 265 | 0 |
from ._decorators import singleton,\
hide_field
__license__ = "MIT"
|
__version__ = "0.2.1"
__author__ = "Nikolay Romashchenko"
__maintainer__ = "Nikolay Ro
|
mashchenko"
__email__ = "nikolay.romashchenko@gmail.com"
__status__ = "Development"
|
gpotter2/scapy
|
doc/scapy/_ext/linkcode_res.py
|
Python
|
gpl-2.0
| 3,293 | 0.002126 |
import inspect
import os
import sys
import scapy
# -- Linkcode resolver -----------------------------------------------------
# This is HEAVILY inspired by numpy's
# https://github.com/numpy/numpy/blob/73fe877ff967f279d470b81ad447b9f3056c1335/doc/source/conf.py#L390
# Copyright (c) 2005-2020, NumPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISC
|
LAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION
|
) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
fn = None
lineno = None
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
fn = os.path.relpath(fn, start=os.path.dirname(scapy.__file__))
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
if 'dev' in scapy.__version__:
return "https://github.com/secdev/scapy/blob/master/scapy/%s%s" % (
fn, linespec)
else:
return "https://github.com/secdev/scapy/blob/v%s/scapy/%s%s" % (
scapy.__version__, fn, linespec)
|
astrofle/CRRLpy
|
examples/synth_spec/makemodel.py
|
Python
|
mit
| 4,292 | 0.011883 |
#!/usr/bin/env python
"""
Example of a model fitting script.
The user should modify the model
according to the characteristics of
the signal of interest.
Part of the code is taken from the
kmpfit examples:
http://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#profile-fitting
"""
import numpy as np
import argparse
from lmfit import Model
from scipy.special import wofz
from crrlpy import crrls
ln2 = np.log(2)
def voigt(x, y):
# The Voigt function is also the real part of
# w(z) = exp(-z^2) erfc(iz), the complex probability function,
# which is also known as the Faddeeva function. Scipy has
# implemented this function under the name wofz()
z = x + 1j*y
I = wofz(z).real
return I
def Voigt(nu, alphaD, alphaL, nu_0, A, a, b):
"""
The Voigt line shape in terms of its physical parameters
nu: independent variable
alphaD: FWHM of the Gaussian
alphaL: FWHM of the Lorentzian
nu_0: the line center
A: the line area
a, b: background parameters. bkgd = a + b*nu
"""
f = np.sqrt(ln2)
x = (nu - nu_0)/alphaD * f
y = alphaL/alphaD * f
bkgd = a + b*nu
V = A*f/(alphaD*np.sqrt(np.pi)) * voigt(x, y) + bkgd
return V
def funcV(x, p):
# Compose the Voigt line-shape
alphaD, alphaL, nu_0, I, a, b = p
return Voigt(x, alphaD, alphaL, nu_0, I, a, b)
def main(spec, output, plot):
"""
"""
dD = 3 # 3 km/s Doppler FWHM for the lines
data = np.loadtxt(spec)
x = data[:,0]
y = data[:,1]
w = data[:,2]
# Catch NaNs and invalid values:
mask_x = np.ma.masked_equal(x, -9999).mask
mask_y = np.isnan(y)
mask = np.array(reduce(np.logical_or, [mask_x, mask_y]))
mx = x[~mask]
my = y[~mask]
mw = w[~mask]
# Create the model and set the parameters
mod1 = Model(Voigt, prefix='V1_')
pars = mod1.make_params()
#mod2 = Model(Voigt, prefix='V2_')
#pars += mod2.make_params()
mod = mod1 #+ mod2
# Edit the model parameter starting values, conditions, etc...
# Background parameters
pars['V1_a'].set(value=0, expr='', vary=False)
pars['V1_b'].set(value=0, expr='', vary=False)
#pars['V2_a'].set(value=0, expr='', vary=False)
#pars['V2_b'].set(value=0, expr='', vary=False)
# Line center
pars['V1_nu_0'].set(value=-47., vary=True, min=-50, max=-45)
#pars['V2_n
|
u_0'].set(value=-37.6, expr='V1_nu_0+9.4', vary=False)
# Line area
pars['V1_A'].set(value=-1e-2, max=-1e-8)
#pars['V2_A'].set(value=-1e-2, max=-1e-8)
# Line width
pars['V1_alphaD'].set(value=dD, vary=True, min=0.)
#pars['V2_alphaD'].set(value=dD, vary=True, min=0.)
pars['V1_alphaL'].set(value=1, vary=True, min
|
=0, max=250.)
#pars['V2_alphaL'].set(value=1, vary=True, min=0, max=250.)
# Fit the model using a weight
fit = mod.fit(my, pars, nu=mx, weights=mw)
fit1 = Voigt(mx, fit.params['V1_alphaD'].value, fit.params['V1_alphaL'].value,
fit.params['V1_nu_0'].value, fit.params['V1_A'].value, fit.params['V1_a'].value, 0)
#fit2 = Voigt(mx, fit.params['V2_alphaD'].value, fit.params['V2_alphaL'].value,
#fit.params['V2_nu_0'].value, fit.params['V2_A'].value, 0, 0)
fit3 = fit.best_fit
#mody = np.array([fit1, fit2, fit3])
mody = np.array([fit1, fit3])
#modx = np.array([mx, mx, mx])
modx = np.array([mx, mx])
# Disable for now, and check this: http://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server
# fir a possible fix.
crrls.plot_model(mx, my, modx, mody, plot)
np.savetxt(output, np.c_[mx, fit.best_fit])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('spec', type=str,
help="Spectrum to be fit. (string)")
parser.add_argument('output', type=str,
help="Name of the output file with the best fit model. (string)")
parser.add_argument('plot', type=str,
help="Name of the output figure. (string)")
args = parser.parse_args()
main(args.spec, args.output, args.plot)
|
em92/pickup-rating
|
qllr/blueprints/player/__init__.py
|
Python
|
mit
| 2,763 | 0.002533 |
# -*- coding: utf-8 -*-
from typing import Tuple
from asyncpg import Connection
from starlette.requests import Request
from starlette.responses import JSONResponse, RedirectResponse
from starlette.routing import Route
from qllr.app import App
from qllr.endpoints import Endpoint, HTTPEndpoint
from qllr.templating import templates
from .methods import get_best_match_of_player, get_player_info, get_player_info_mod_date
bp = App()
class PlayerEndpoint(Endpoint):
async def get_last_doc_modified(self, request: Request, con: Connection) -> Tuple:
return await get_player_info_mod_date(
con, request.path_params["steam_id"], request.path_params.get("gametype_id")
)
class PlayerJson(PlayerEndpoint):
async def get_document(self, request: Request, con: Connection):
steam_id = request.path_params["steam_id"]
return JSONResponse(await get_player_info(con, steam_id))
class PlayerHtml(PlayerEndpoint):
async def get_document(self, request: Request, con: Connection):
steam_id = request.path_params["steam_id"]
context = await get_player_info(con, steam_id)
context["request"] = request
context["steam_id"] = str(steam_id)
return templates.TemplateResponse("player_stats.html", context)
class PlayerMatchesDeprecatedRoute(HTTPEndpoint):
async def get(self, request: Request):
return RedirectResponse(
request.url_for(
"MatchesHtml",
steam_id=request.path_params["steam_id"],
page=request.path_params.get("page"),
gametype=request.path_params.get("gametype"),
),
status_code=308,
)
class BestMatchOfPlayerRedirect(PlayerEndpoint):
async def get_document(self, request: Request, con: Connection):
steam_id = request.path_params["steam_id"]
gametype_id = request.path_params["gametype_id"]
match_id = await get_best_match_of_player(con, steam_id, gametyp
|
e_id)
return RedirectResponse(request.url_for("ScoreboardHtml", match_id=match_id))
routes = [
Route("/{steam_id:int}.json", endpoint=PlayerJson),
Route("/{steam_id:int}", endpoint=PlayerHtml),
Route("/{steam_id:int}/matches", endpoint=PlayerMatchesDeprecatedRoute),
Route("/{steam_id:int}
|
/matches/", endpoint=PlayerMatchesDeprecatedRoute),
Route("/{steam_id:int}/matches/{page:int}/", endpoint=PlayerMatchesDeprecatedRoute),
Route("/{steam_id:int}/matches/{gametype}/", endpoint=PlayerMatchesDeprecatedRoute),
Route(
"/{steam_id:int}/matches/{gametype}/{page:int}/",
endpoint=PlayerMatchesDeprecatedRoute,
),
Route("/{steam_id:int}/best_match/{gametype}", endpoint=BestMatchOfPlayerRedirect),
]
|
VirrageS/io-kawiarnie
|
caffe/caffe/models.py
|
Python
|
mit
| 931 | 0 |
"""Module with Caffe models."""
from django.db import models
from employees.models import Employee
class Caffe(models.Model):
"""Stores one cafe."""
name = models.CharField(max_length=100, unique=True)
city = models.CharField(max_length=100)
street = models.CharField(max_length=100)
# CharField
|
for extra characters like '-'
postal_code = models.CharField(max_length=20)
# CharFields in case house numbers like '1A'
building_number = models.CharField(max_length=10)
house_number = models.CharField(max_length=10, blank=True)
created_on = models.TimeField(auto_now_add=True)
creator = models.ForeignKey(Employee,
related
|
_name='my_caffe',
default=None,
blank=False,
null=True)
def __str__(self):
return '{}, {}'.format(self.name, self. city)
|
yotchang4s/cafebabepy
|
src/main/python/ctypes/test/test_find.py
|
Python
|
bsd-3-clause
| 3,948 | 0.000507 |
import unittest
import os.path
import sys
import test.support
from ctypes import *
from ctypes.util import find_library
# On some systems, loading the OpenGL libraries needs the RTLD_GLOBAL mode.
class Test_OpenGL_libs(unittest.TestCase):
@classmethod
def setUpClass(cls):
lib_gl = lib_glu = lib_gle = None
if sys.platform == "win32":
lib_gl = find_library("OpenGL32")
lib_glu = find_library("Glu32")
elif sys.platform == "darwin":
lib_gl = lib_glu = find_library("OpenGL")
else:
lib_gl = find_library("GL")
lib_glu = find_library("GLU")
lib_gle = find_library("gle")
## print, for debugging
if test.support.verbose:
print("OpenGL libraries:")
for item in (("GL", lib_gl),
("GLU", lib_glu),
("gle", lib_gle)):
print("\t", item)
cls.gl = cls.glu = cls.gle = None
if lib_gl:
try:
cls.gl = CDLL(lib_gl, mode=RTLD_GLOBAL)
except OSError:
pass
if lib_glu:
try:
cls.glu = CDLL(lib_glu, RTLD_GLOBAL)
except OSError:
pass
if lib_gle:
try:
cls.gle = CDLL(lib_gle)
except OSError:
pass
@classmethod
def tearDownClass(cls):
cls.gl = cls.glu = cls.gle = None
def test_gl(self):
if self.gl is None:
self.skipTest('lib_gl not available')
self.gl.glClearIndex
def test_glu(self):
|
if self.glu is None:
self.skipTest('lib_glu not available')
self.glu.gluBeginCurve
def test_gle(self):
if self.gle is None:
self.skipTest('lib_gle not available')
self.gle.gleGetJoinStyle
def test_shell_injection(self):
result = find_library('; echo Hello shell > ' + test.support.TESTFN)
se
|
lf.assertFalse(os.path.lexists(test.support.TESTFN))
self.assertIsNone(result)
@unittest.skipUnless(sys.platform.startswith('linux'),
'Test only valid for Linux')
class LibPathFindTest(unittest.TestCase):
def test_find_on_libpath(self):
import subprocess
import tempfile
try:
p = subprocess.Popen(['gcc', '--version'], stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
out, _ = p.communicate()
except OSError:
raise unittest.SkipTest('gcc, needed for test, not available')
with tempfile.TemporaryDirectory() as d:
# create an empty temporary file
srcname = os.path.join(d, 'dummy.c')
libname = 'py_ctypes_test_dummy'
dstname = os.path.join(d, 'lib%s.so' % libname)
with open(srcname, 'w') as f:
pass
self.assertTrue(os.path.exists(srcname))
# compile the file to a shared library
cmd = ['gcc', '-o', dstname, '--shared',
'-Wl,-soname,lib%s.so' % libname, srcname]
out = subprocess.check_output(cmd)
self.assertTrue(os.path.exists(dstname))
# now check that the .so can't be found (since not in
# LD_LIBRARY_PATH)
self.assertIsNone(find_library(libname))
# now add the location to LD_LIBRARY_PATH
with test.support.EnvironmentVarGuard() as env:
KEY = 'LD_LIBRARY_PATH'
if KEY not in env:
v = d
else:
v = '%s:%s' % (env[KEY], d)
env.set(KEY, v)
# now check that the .so can be found (since in
# LD_LIBRARY_PATH)
self.assertEqual(find_library(libname), 'lib%s.so' % libname)
if __name__ == "__main__":
unittest.main()
|
aldialimucaj/Streaker
|
docs/source/conf.py
|
Python
|
mit
| 11,449 | 0.006376 |
# -*- coding: utf-8 -*-
#
# Streaker documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 6 12:34:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Streaker'
copyright = u'2016, Aldi Alimucaj'
author = u'Aldi Alimucaj'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Streakerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, docu
|
mentclass [howto, m
|
anual, or own class]).
latex_documents = [
(master_doc, 'Streaker.tex', u'Streaker Documentation',
u'Aldi Alimucaj', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices
|
fmin2958/POCS
|
panoptes/camera/canon_indi.py
|
Python
|
mit
| 6,347 | 0.002521 |
import re
import yaml
import subprocess
import os
import datetime
from . import AbstractCamera
from ..utils.logger import has_logger
from ..utils.config import load_config
from ..utils.indi import PanIndi
@has_logger
class Camera(AbstractCamera):
def __init__(self, device_name, client=PanIndi(), config=dict(), *args, **kwargs):
assert client.devices[device_name] is not None
super().__init__(config=config, *args, **kwargs)
self.client = client
self.name = device_name
self.device = client.devices[device_name]
self.last_start_time = None
def connect(self):
'''
For Canon DSLRs using gphoto2, this just means confirming that there is
a camera on that port and that we can communicate with it.
'''
self.logger.info('Connecting to camera')
# connect to device
self.client.connectDevice(self.device.getDeviceName())
self.client.connectDevice(self.device.getDeviceName())
# set BLOB mode to BLOB_ALSO
self.client.setBLOBMo
|
de(1, self.name, None)
self.logger.info("Connected to camera")
self.init()
def init(self):
self.logger.info("Setting defaults for camera")
self.client.get_property_value(self.name, 'UPLOAD_MODE')
# self.client.sendNewText(self.name, 'UPLOAD_MODE', 'Local', 'On')
self.client.sendNewText(self.name, 'CCD_ISO', '100', 'On')
# result = self.set('Auto Power Off', 0) # Don't power off
#
|
result = self.set('/main/settings/reviewtime', 0) # Screen off
# result = self.set('/main/settings/capturetarget', 1) # SD Card
# result = self.set('/main/settings/ownername', 'Project PANOPTES')
# result = self.set('/main/settings/copyright', 'Project PANOPTES 2015')
#
# result = self.set('/main/status/lensname', 'Rokinon 85mm')
#
# result = self.set('/main/imgsettings/imageformat', 9) # RAW
# result = self.set('/main/imgsettings/imageformatsd', 9) # RAW
# result = self.set('/main/imgsettings/imageformatcf', 9) # RAW
# result = self.set('/main/imgsettings/iso', 1) # ISO 100
# result = self.set('/main/imgsettings/colorspace', 0) # sRGB
#
# result = self.set('/main/capturesettings/focusmode', 0) # Manual
# result = self.set('/main/capturesettings/autoexposuremode', 3) # 3 - Manual; 4 - Bulb
# result = self.set('/main/capturesettings/drivemode', 0) # Single exposure
# result = self.set('/main/capturesettings/picturestyle', 1) # Standard
#
# result = self.set('/main/capturesettings/shutterspeed', 0) # Bulb
#
# result = self.set('/main/actions/syncdatetime', 1) # Sync date and time to computer
# result = self.set('/main/actions/uilock', 1) # Don't let the UI change
#
# # Get Camera Properties
# self.get_serial_number()
# -------------------------------------------------------------------------
# Generic Panoptes Camera Methods
# -------------------------------------------------------------------------
def start_cooling(self):
'''
This does nothing for a Canon DSLR as it does not have cooling.
'''
self.logger.info('No camera cooling available')
self.cooling = True
def stop_cooling(self):
'''
This does nothing for a Canon DSLR as it does not have cooling.
'''
self.logger.info('No camera cooling available')
self.cooling = False
def is_exposing(self):
'''
'''
pass
# -------------------------------------------------------------------------
# Actions Specific to Canon / gphoto
# -------------------------------------------------------------------------
def get_serial_number(self):
''' Gets the 'EOS Serial Number' property
Populates the self.serial_number property
Returns:
str: The serial number
'''
self.serial_number = self.get('Serial Number')
return self.serial_number
def get_iso(self):
'''
Queries the camera for the ISO setting and populates the self.iso
property with a string containing the ISO speed.
'''
self.iso = self.get('ISO Speed')
return self.iso
def set_iso(self, iso):
'''
Sets the ISO speed of the camera after checking that the input value (a
string or in) is in the list of allowed values in the self.iso_options
dictionary.
'''
if not iso:
iso = 400
print(iso)
self.get_iso()
self.set('ISO Speed', iso)
def get_model(self):
'''
Gets the Camera Model string from the camera and populates the
self.model property.
'''
self.model = self.get('Camera Model')
return self.model
def get_shutter_count(self):
'''
Gets the shutter count value and populates the self.shutter_count
property.
'''
self.shutter_count = self.get('Shutter Counter')
return self.shutter_count
def construct_filename(self):
'''
Use the filename_pattern from the camera config file to construct the
filename for an image from this camera
'''
if self.last_start_time:
filename = self.last_start_time.strftime('image_%Y%m%dat%H%M%S.cr2')
else:
filename = self.last_start_time.strftime('image.cr2')
return filename
def take_exposure(self, exptime=5):
""" Take an exposure """
self.logger.info("<<<<<<<< Exposure >>>>>>>>>")
self.logger.info('Taking {} second exposure'.format(exptime))
self.last_start_time = datetime.datetime.now()
#get current exposure time
exp = self.device.getNumber("CCD_EXPOSURE")
# set exposure time to 5 seconds
exp[0].value = exptime
# send new exposure time to server/device
self.client.sendNewNumber(exp)
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
|
tedlaz/pyted
|
misthodosia/m13/f_pro.py
|
Python
|
gpl-3.0
| 1,976 | 0.013122 |
# -*- coding: utf-8 -*-
'''
Created on 22 Ιαν 2013
@author: tedlaz
'''
from PyQt4 import QtGui, QtCore
fr
|
om gui import ui_pro
class dlg(QtGui.QDialog):
def __init__(self, args=None, parent=None):
super(dlg, self).__init__(parent)
self.ui = ui_pro.Ui_Dialog()
self.ui.setup
|
Ui(self)
self.makeConnections()
if parent:
self.db = parent.db
else:
self.db = ''
def makeConnections(self):
QtCore.QObject.connect(self.ui.b_save, QtCore.SIGNAL("clicked()"),self.saveToDb)
def saveToDb(self):
from utils.dbutils import commitToDb
sql = "INSERT INTO m12_pro(prod,fpr_id,coy_id,eid_id,proy,aptyp_id,apod) VALUES('%s',%s,%s,%s,%s,%s,%s)"
ar = []
ar.append(self.ui.le_prod.text())
ar.append(self.ui.le_fpr_id.text())
ar.append(self.ui.le_coy_id.text())
ar.append(self.ui.le_eid_id.text())
ar.append(self.ui.le_proy.text())
ar.append(self.ui.le_aptyp_id.text())
ar.append(self.ui.le_apod.text())
if self.db:
try:
noId = commitToDb(sql % tuple(ar),self.db)
QtGui.QMessageBox.warning(self,u'Επιτυχής αποθήκευση',u'Η εγγραφή αποθηκεύτηκε με αριθμό : %s' % noId)
#self.ui.le_id.setText(noId)
except Exception:
QtGui.QMessageBox.warning(self,u'Λάθος κατά την αποθήκευση',u'Υπάρχει ήδη φυσικό πρόσωπο με αυτά τα στοιχεία')
else:
QtGui.QMessageBox.critical(self,u'Λάθος !!!',u'Δεν υπάρχει σύνδεση με Βάση Δεδομένων')
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
form = dlg(sys.argv)
form.show()
app.exec_()
|
UdeM-LBIT/GAPol
|
lib/ga/evolve/Selectors.py
|
Python
|
gpl-3.0
| 3,696 | 0.0046 |
"""
:mod:`Selectors` -- selection methods module
==============================================================
This module have the *selection methods*, like roulette wheel, tournament, ranking, etc.
"""
import random
import Consts
def GRankSelector(population, **args):
""" The Rank Selector - This selector will pick the best individual of
the population every time.
"""
count = 0
if args["popID"] != GRankSelector.cachePopID:
best_fitness = population.bestFitness().fitness
for index in xrange(1, len(population.internalPop)):
if population[index].fitness == best_fitness:
count += 1
GRankSelector.cachePopID = args["popID"]
GRankSelector.cacheCount = count
else:
count = GRankSelector.cacheCount
return population[random.randint(0, count)]
GRankSelector.cachePopID = None
GRankSelector.cacheCount = None
def GUniformSelector(population, **args):
""" The Uniform Selector """
return population[random.randint(0, len(population) - 1)]
def GTournamentSelector(population, **args):
""" The Tournament Selector
It accepts the *tournamentPool* population parameter.
.. note::
the Tournament Selector uses the Roulette Wheel to
pick individuals for the pool
"""
choosen = None
poolSize = population.getParam("tournamentPool", Consts.CDefTournamentPoolSize)
tournament_pool = [GRouletteWheel(populati
|
on, **args) for i in xrange(poolSize)]
choosen = min(tournament_pool, key=lambda ind: ind.fitness)
return choosen
def GTournamentSelectorAlternative(population, **args):
""" The alternative Tournament Selector
This Tournament Selector don't uses the Roulette Wheel
It accepts the *tournamentPool* population parameter.
"""
pool_size = population.getParam("tournamentPool", Consts.CDefTournamentPoolSize)
len_pop = len(population
|
)
tournament_pool = [population[random.randint(0, len_pop - 1)] for i in xrange(pool_size)]
choosen = min(tournament_pool, key=lambda ind: ind.fitness)
return choosen
def GRouletteWheel(population, **args):
""" The Roulette Wheel selector """
psum = None
if args["popID"] != GRouletteWheel.cachePopID:
GRouletteWheel.cachePopID = args["popID"]
psum = GRouletteWheel_PrepareWheel(population)
GRouletteWheel.cacheWheel = psum
else:
psum = GRouletteWheel.cacheWheel
cutoff = random.random()
lower = 0
upper = len(population) - 1
while(upper >= lower):
i = lower + ((upper - lower) / 2)
if psum[i] > cutoff:
upper = i - 1
else:
lower = i + 1
lower = min(len(population) - 1, lower)
lower = max(0, lower)
return population.bestFitness(lower)
GRouletteWheel.cachePopID = None
GRouletteWheel.cacheWheel = None
def GRouletteWheel_PrepareWheel(population):
""" A preparation for Roulette Wheel selection """
len_pop = len(population)
psum = [i for i in xrange(len_pop)]
population.statistics()
pop_fitMax = population.stats["fitMax"]
pop_fitMin = population.stats["fitMin"]
if pop_fitMax == pop_fitMin:
for index in xrange(len_pop):
psum[index] = (index + 1) / float(len_pop)
elif (pop_fitMax > 0 and pop_fitMin >= 0) or (pop_fitMax <= 0 and pop_fitMin < 0):
population.sort()
psum[0] = -population[0].fitness + pop_fitMax + pop_fitMin
for i in xrange(1, len_pop):
psum[i] = -population[i].fitness + pop_fitMax + pop_fitMin + psum[i - 1]
for i in xrange(len_pop):
psum[i] /= float(psum[len_pop - 1])
return psum
|
RudolfCardinal/crate
|
crate_anon/linkage/bulk_hash.py
|
Python
|
gpl-3.0
| 6,042 | 0 |
#!/usr/bin/env python
"""
crate_anon/linkage/bulk_hash.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
Tool to hash multiple IDs from the command line.
Test code to look at different types of digest:
.. code-block:: python
import hashlib
import hmac
msg = "This is an ex-parrot!"
key = "voom"
key_bytes = str(key).encode('utf-8')
msg_bytes = str(msg).encode('utf-8')
digestmod = hashlib.sha256
hmac_obj = hmac.new(key=key_bytes, msg=msg_bytes, digestmod=digestmod)
# These are the two default kinds of
|
digest:
print(hmac_obj.digest()) # 8-bit binary
print(hmac_obj
|
.hexdigest()) # hexadecimal
# Hex carries 4 bits per character. There are other possibilities,
# notably:
# - Base64 with 6 bits per character;
# - Base32 with 5 bits per character.
"""
import argparse
import logging
from typing import Optional, TextIO
from cardinal_pythonlib.file_io import (
gen_noncomment_lines,
smart_open,
writeline_nl,
)
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
from cardinal_pythonlib.hash import (
HashMethods,
make_hasher,
)
log = logging.getLogger(__name__)
def get_first_noncomment_line(filename: str) -> Optional[str]:
try:
with open(filename) as f:
return next(gen_noncomment_lines(f))
except StopIteration:
return None
def bulk_hash(input_filename: str,
output_filename: str,
hash_method: str,
key: str,
keep_id: bool = True):
"""
Hash lines from one file to another.
Args:
input_filename:
input filename, or "-" for stdin
output_filename:
output filename, or "-" for stdin
hash_method:
method to use; e.g. ``HMAC_SHA256``
key:
secret key for hasher
keep_id:
produce CSV with ``hash,id`` pairs, rather than just lines with
the hashes?
Note that the hash precedes the ID with the ``keep_id`` option, which
works best if the ID might contain commas.
"""
log.info(f"Reading from: {input_filename}")
log.info(f"Writing to: {output_filename}")
log.info(f"Using hash method: {hash_method}")
log.info(f"keep_id: {keep_id}")
log.debug(f"Using key: {key!r}") # NB security warning in help
hasher = make_hasher(hash_method=hash_method, key=key)
with smart_open(input_filename, "rt") as i: # type: TextIO
with smart_open(output_filename, "wt") as o: # type: TextIO
for line in gen_noncomment_lines(i):
hashed = hasher.hash(line) if line else ""
outline = f"{hashed},{line}" if keep_id else hashed
# log.debug(f"{line!r} -> {hashed!r}")
writeline_nl(o, outline)
def main() -> None:
"""
Command-line entry point.
"""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="Hash IDs in bulk, using a cryptographic hash function.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'infile', type=str,
help="Input file, or '-' for stdin. "
"Use one line per thing to be hashed. "
"Comments (marked with '#') and blank lines are ignored. "
"Lines have whitespace stripped left and right.")
parser.add_argument(
'--outfile', type=str, default="-",
help="Output file, or '-' for stdout. "
"One line will be written for every input line. "
"Blank lines will be written for commented or blank input.")
parser.add_argument(
'--key', type=str,
help="Secret key for hasher (warning: may be visible in process list; "
"see also --keyfile)")
parser.add_argument(
'--keyfile', type=str,
help="File whose first noncomment line contains the secret key for "
"the hasher. (It will be whitespace-stripped right and left.)")
parser.add_argument(
'--method', choices=[HashMethods.HMAC_MD5,
HashMethods.HMAC_SHA256,
HashMethods.HMAC_SHA512],
default=HashMethods.HMAC_MD5,
help="Hash method")
parser.add_argument(
'--keepid', action="store_true",
help="Produce CSV output with (hash,id) rather than just the hash")
parser.add_argument(
'--verbose', '-v', action="store_true",
help="Be verbose (NB will write key to stderr)")
args = parser.parse_args()
main_only_quicksetup_rootlogger(logging.DEBUG if args.verbose
else logging.INFO)
assert bool(args.key) != bool(args.keyfile), (
"Specify either --key or --keyfile (and not both)."
)
if args.keyfile:
key = get_first_noncomment_line(args.keyfile)
assert key, f"No key found in keyfile: {args.keyfile}"
else:
key = args.key
bulk_hash(
input_filename=args.infile,
output_filename=args.outfile,
hash_method=args.method,
key=key,
keep_id=args.keepid,
)
if __name__ == "__main__":
main()
|
guotie/flask-acl
|
setup.py
|
Python
|
mit
| 1,588 | 0.019521 |
"""
flask.ext.acl
=============
This extension provides an Access Control implementation for `tipfy <http://www.tipfy.org/>`_.
Links
-----
* `Documentation <http://www.tipfy.org/wiki/extensions/acl/>`_
* `Source Code Repository <http://code.google.com/p/tipfy-ext-acl/>`_
* `Issue Tracker <http://code.google.com/p/tipfy-ext-acl/issues/list>`_
About tipfy
-----------
* `Home page <http://www.tipfy.org/>`_
* `Extension list <http://www.tipfy.org/wiki/extensions/>`_
* `Discussion Group <http://groups.google.com/group/tipfy>`_
"""
from setuptools import setup
setup(
name = 'flask.ext.acl',
version = '0.6',
license = 'BSD',
url = 'https://github.com/guotie/flask-acl',
description = 'Access Control extension for flask',
long_description = __doc__,
author = 'guotie
|
',
author_email = 'guotie.9@gmail.com',
zip_safe = False,
platforms = 'any',
packages = [
'flask',
'flask.ext',
],
namespace_packages = [
'flask',
'flask.ext',
|
],
include_package_data = True,
install_requires = [
'flask',
'flask.ext.sqlalchemy',
'flask.ext.cache',
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
PolicyStat/jobtastic
|
test_projects/django/testproj/urls.py
|
Python
|
mit
| 307 | 0.003257 |
from django.conf.urls import
|
patterns, include, url
try:
from djcelery.views import apply
urlpatterns = patterns('',
url(r'^apply/(?P<task_name>.+?)/', apply, name='celery-apply'),
url(r'^celery/', include('djcelery.urls')),
)
except ImportError:
urlpatterns = patter
|
ns('')
|
debugger06/MiroX
|
lib/test/widgetstateconstantstest.py
|
Python
|
gpl-2.0
| 2,572 | 0.002722 |
from miro.test.framework import MiroTestCase
from miro.frontends.widgets.widgetstatestore import WidgetStateStore
from miro.frontends.widgets.itemlist import SORT_KEY_MAP
class WidgetStateConstants(MiroTestCase):
def setUp(self):
MiroTestCase.setUp(self)
self.display_types = set(WidgetStateStore.get_display_types())
def test_view_types(self):
# test that all view types are different
view_types = (WidgetStateStore.get_list_view_type(),
WidgetStateStore.get_standard_view_type(),
WidgetStateStore.get_album_view_type())
for i in range(len(view_types)):
for j in range(i + 1, len(view_types)):
self.assertNotEqual(view_types[i], view_types[j])
def test_default_view_types(self):
display_types = set(WidgetStateStore.DEFAULT_VIEW_TYPE)
self.assertEqual(self.display_types, display_types)
def test_default_column_widths(self):
# test that all available columns have widths set for them
# calculate all columns that available for some display/view
# combination
available_columns = set()
display_id = None # this isn't used yet, just set it to a dummy value
for display_type in sel
|
f.display_types:
for view_type in (WidgetStateStore.get_list_view_type(),
|
WidgetStateStore.get_standard_view_type(),
WidgetStateStore.get_album_view_type()):
available_columns.update(
WidgetStateStore.get_columns_available(
display_type, display_id, view_type))
# make sure that we have widths for those columns
self.assertEqual(available_columns,
set(WidgetStateStore.DEFAULT_COLUMN_WIDTHS.keys()))
def test_default_sort_column(self):
display_types = set(WidgetStateStore.DEFAULT_SORT_COLUMN)
self.assertEqual(self.display_types, display_types)
def test_default_columns(self):
display_types = set(WidgetStateStore.DEFAULT_COLUMNS)
self.assertEqual(self.display_types, display_types)
def test_available_columns(self):
# Currently what get_display_types() uses. Testing it anyway.
display_types = set(WidgetStateStore.AVAILABLE_COLUMNS)
self.assertEqual(self.display_types, display_types)
def test_sort_key_map(self):
columns = set(WidgetStateStore.DEFAULT_COLUMN_WIDTHS)
sort_keys = set(SORT_KEY_MAP)
self.assertEqual(sort_keys, columns)
|
msfrank/mandelbrot
|
mandelbrot/timerange.py
|
Python
|
gpl-3.0
| 6,251 | 0.004799 |
# Copyright 2014 Michael Frank <msfrank@syntaxjockey.com>
#
# This file is part of Mandelbrot.
#
# Mandelbrot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mandelbrot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mandelbrot. If not, see <http://www.gnu.org/licenses/>.
import datetime
import pyparsing as pp
from mandelbrot.model.timestamp import Timestamp, UTC
EpochDateTime = pp.Word(pp.srange('[1-9]'), pp.srange('[0-9]'))
def parseEpochDateTime(tokens):
return datetime.datetime.fromtimestamp(int(tokens[0]), UTC)
EpochDateTime.setParseAction(parseEpochDateTime)
ISODateTimeUTC = pp.Regex(r'\d{4}-\d{2}-\d{2}T\d{2}\:\d{2}\:\d{2}Z')
def parseISODateTime(tokens):
return datetime.datetime.strptime(tokens[0], '%Y-%m-%dT%H:%M:%SZ')
ISODateTimeUTC.setParseAction(parseISODateTime)
ISODateTimeAndOffset = pp.Regex(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{2}:\d{2}')
def parseISODateTimeAndOffset(tokens):
return datetime.datetime.strptime(tokens[0], '%Y-%m-%dT%H:%M:%S%z')
ISODateTimeAndOffset.setParseAction(parseISODateTimeAndOffset)
ISODateTime = ISODateTimeUTC | ISODateTimeAndOffset
TimeValue = pp.Word(pp.srange('[1-9]'), pp.srange('[0-9]'))
UnitDays = pp.CaselessKeyword('day') | pp.CaselessKeyword('days')
UnitDays.setParseAction(lambda x: lambda hours: hours * 60 * 60 * 24)
UnitHours = pp.CaselessKeyword('hour') | pp.CaselessKeyword('hours')
UnitHours.setParseAction(lambda x: lambda hours: hours * 60 * 60)
UnitMinutes = pp.CaselessKeyword('minute') | pp.CaselessKeyword('minutes')
UnitMinutes.setParseAction(lambda x: lambda minutes: minutes * 60)
UnitSeconds = pp.CaselessKeyword('second') | pp.CaselessKeyword('seconds')
UnitSeconds.setParseAction(lambda x: lambda seconds: seconds)
TimeUnit = UnitDays | UnitHours | UnitMinutes | UnitSeconds
DirectionAgo = pp.CaselessLiteral("ago")
DirectionAgo.setParseAction(lambda x: lambda point,delta: point - delta)
DirectionAhead = pp.CaselessLiteral("ahead")
DirectionAhead.setParseAction(lambda x: lambda point,delta: point + delta)
RelativeDirection = DirectionAgo | DirectionAhead
RelativeDateTime = TimeValue + TimeUnit + RelativeDirection
def parseRelativeDateTime(tokens):
value = int(tokens[0])
magnify = tokens[1]
shift = tokens[2]
seconds = magnify(value)
return shift(datetime.datetime.now(UTC), datetime.timedelta(seconds=seconds))
RelativeDateTime.setParseAction(parseRelativeDateTime)
DateTime = ISODateTime | RelativeDateTime | EpochDateTime
ClosedDateTimeRange = DateTime + pp.Suppress(pp.Literal('..')) + DateTime
LeftOpenDateTimeRange = pp.Literal('..') + DateTime
RightOpenDateTimeRange = DateTime + pp.Literal('..')
DateTimeRange = ClosedDateTimeRange | LeftOpenDateTimeRange | RightOpenDateTimeRange
DateTimePlusDelta = DateTime + pp.Suppress(pp.Literal('+')) + TimeValue + TimeUnit
def parseDateTimePlusDelta(tokens):
start = tokens[0]
value = int(tokens[1])
magnify = tokens[2]
delta = datetime.timedelta(seconds=magnify(value))
return [start, start + delta]
DateTimePlusDelta.setParseAction(parseDateT
|
imePlusDelta)
NowPlusDelta = pp.Suppress(pp.Literal('+')) + TimeValue
|
+ TimeUnit
def parseNowPlusDelta(tokens):
start = datetime.datetime.now(UTC)
value = int(tokens[0])
magnify = tokens[1]
delta = datetime.timedelta(seconds=magnify(value))
return [start, start + delta]
NowPlusDelta.setParseAction(parseNowPlusDelta)
DateTimeWindow = ClosedDateTimeRange | DateTimePlusDelta | NowPlusDelta
def datetime_to_timestamp(dt):
timestamp = Timestamp()
timestamp.set_datetime(dt)
return timestamp
def parse_datetime(string):
"""
Parse a datetime string. Datetimes may be specified using the following formats:
ISOFORMAT ISO-8601 format, e.g. "2015-05-01T12:45:00Z"
RELATIVE some magnitude relative to now, e.g. "2 hours ago" or "15 days ahead"
EPOCH seconds since the UNIX epoch
:param string: The timerange to parse
:type string: str
:returns: the datetime as a Timestamp
:rtype: Timestamp
:raises ValueError: the timerange could not be parsed
"""
try:
return datetime_to_timestamp(DateTime.parseString(string, parseAll=True).asList()[0])
except Exception as e:
raise ValueError("failed to parse datetime '%s'" % string)
def parse_timerange(string):
"""
Parse a timerange string. Timeranges may be specified using the following formats:
START..END between START and END
START.. from START to infinity
..END from -infinity to END
:param string: The timerange to parse
:type string: str
:returns: A 2-tuple containing the start and end timestamps
:rtype: tuple[Timestamp,Timestamp]
:raises ValueError: the timerange could not be parsed
"""
try:
start,end = DateTimeRange.parseString(string, parseAll=True).asList()
start = None if start == ".." else datetime_to_timestamp(start)
end = None if end == ".." else datetime_to_timestamp(end)
return (start,end)
except Exception as e:
raise ValueError("failed to parse timerange '%s'" % string)
def parse_timewindow(string):
"""
Parse a timewindow. Timewindows may be specified using the following
formats:
START..END
START+DELTA
+DELTA
:param string: The timewindow to parse
:type string: str
:returns: A 2-tuple containing the start and end timestamps
:rtype: tuple[Timestamp,Timestamp]
:raises ValueError: the timewindow could not be parsed
"""
try:
start,end = DateTimeWindow.parseString(string, parseAll=True).asList()
return (datetime_to_timestamp(start), datetime_to_timestamp(end))
except Exception as e:
raise ValueError("failed to parse timewindow '%s'" % string)
|
xkollar/spacewalk
|
spacecmd/src/lib/misc.py
|
Python
|
gpl-2.0
| 28,096 | 0.000285 |
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2013 Aron Parsons <aronparsons@gmail.com>
# Copyright (c) 2011--2015 Red Hat, Inc.
#
# NOTE: the 'self' variable is an instance of SpacewalkShell
# wildcard import
# pylint: disable=W0401,W0614
# unused argument
# pylint: disable=W0613
# invalid function name
# pylint: disable=C0103
import logging
import readline
import shlex
from getpass import getpass
from ConfigParser import NoOptionError
from spacecmd.utils import *
from time import sleep
import xmlrpclib
# list of system selection options for the help output
HELP_SYSTEM_OPTS = '''<SYSTEMS> can be any of the following:
name
ssm (see 'help ssm')
search:QUERY (see 'help system_search')
group:GROUP
channel:CHANNEL
'''
HELP_TIME_OPTS = '''Dates can be any of the following:
Explicit Dates:
Dates can be expressed as explicit date strings in the YYYYMMDD[HHMM]
format. The year, month and day are required, while the hours and
minutes are not; the hours and minutes will default to 0000 if no
values are provided.
Deltas:
Dates can be expressed as delta values. For example, '2h' would
mean 2 hours in the future. You can also use negative values to
express times in the past (e.g., -7d would be one week ago).
Units:
s -> seconds
m -> minutes
h -> hours
d -> days
'''
####################
# life of caches in seconds
SYSTEM_CACHE_TTL = 3600
PACKAGE_CACHE_TTL = 86400
ERRATA_CACHE_TTL = 86400
MINIMUM_API_VERSION = 10.8
SEPARATOR = '\n' + '#' * 30 + '\n'
####################
ENTITLEMENTS = ['enterprise_entitled',
'virtualization_host'
]
SYSTEM_SEARCH_FIELDS =
|
['id', 'name', 'ip', 'hostname',
'device', 'vendor', 'driver', 'uuid']
####################
def help_systems(self):
print HELP_SYSTEM_OPTS
def help_time(self):
print HELP_TIME_OPTS
####################
def
|
help_clear(self):
print 'clear: clear the screen'
print 'usage: clear'
def do_clear(self, args):
os.system('clear')
####################
def help_clear_caches(self):
print 'clear_caches: Clear the internal caches kept for systems' + \
' and packages'
print 'usage: clear_caches'
def do_clear_caches(self, args):
self.clear_system_cache()
self.clear_package_cache()
self.clear_errata_cache()
####################
def help_get_apiversion(self):
print 'get_apiversion: Display the API version of the server'
print 'usage: get_apiversion'
def do_get_apiversion(self, args):
print self.client.api.getVersion()
####################
def help_get_serverversion(self):
print 'get_serverversion: Display the version of the server'
print 'usage: get_serverversion'
def do_get_serverversion(self, args):
print self.client.api.systemVersion()
####################
def help_get_certificateexpiration(self):
print 'get_certificateexpiration: Print the expiration date of the'
print " server's entitlement certificate"
print 'usage: get_certificateexpiration'
def do_get_certificateexpiration(self, args):
date = self.client.satellite.getCertificateExpirationDate(self.session)
print date
####################
def help_list_proxies(self):
print 'list_proxies: List the proxies wihtin the user\'s organization '
print 'usage: list_proxies'
def do_list_proxies(self, args):
proxies = self.client.satellite.listProxies(self.session)
print proxies
####################
def help_get_session(self):
print 'get_session: Show the current session string'
print 'usage: get_session'
def do_get_session(self, args):
if self.session:
print self.session
else:
logging.error('No session found')
####################
def help_help(self):
print 'help: Show help for the given command'
print 'usage: help COMMAND'
####################
def help_history(self):
print 'history: List your command history'
print 'usage: history'
def do_history(self, args):
for i in range(1, readline.get_current_history_length()):
print '%s %s' % (str(i).rjust(4), readline.get_history_item(i))
####################
def help_toggle_confirmations(self):
print 'toggle_confirmations: Toggle confirmation messages on/off'
print 'usage: toggle_confirmations'
def do_toggle_confirmations(self, args):
if self.options.yes:
self.options.yes = False
print 'Confirmation messages are enabled'
else:
self.options.yes = True
logging.warning('Confirmation messages are DISABLED!')
####################
def help_login(self):
print 'login: Connect to a Spacewalk server'
print 'usage: login [USERNAME] [SERVER]'
def do_login(self, args):
(args, _options) = parse_arguments(args)
# logout before logging in again
if len(self.session):
logging.warning('You are already logged in')
return True
# an argument passed to the function get precedence
if len(args) == 2:
server = args[1]
else:
# use the server we were already using
server = self.config['server']
# bail out if not server was given
if not server:
logging.warning('No server specified')
return False
# load the server-specific configuration
self.load_config_section(server)
# an argument passed to the function get precedence
if len(args):
username = args[0]
elif self.config.has_key('username'):
# use the username from before
username = self.config['username']
elif self.options.username:
# use the username from before
username = self.options.username
else:
username = ''
# set the protocol
if self.config.has_key('nossl') and self.config['nossl']:
proto = 'http'
else:
proto = 'https'
server_url = '%s://%s/rpc/api' % (proto, server)
# this will enable spewing out all client/server traffic
verbose_xmlrpc = False
if self.options.debug > 1:
verbose_xmlrpc = True
# connect to the server
logging.debug('Connecting to %s', server_url)
self.client = xmlrpclib.Server(server_url, verbose=verbose_xmlrpc)
# check the API to verify connectivity
try:
self.api_version = self.client.api.getVersion()
logging.debug('Server API Version = %s', self.api_version)
except xmlrpclib.Fault, e:
if self.options.debug > 0:
logging.exception(e)
logging.error('Failed to connect to %s', server_url)
self.client = None
return False
# ensure the server is recent enough
if self.api_version < self.MINIMUM_API_VERSION:
logging.error('API (%s) is too old (>= %s required)',
self.api_version, self.MINIMUM_API_VERSION)
self.client = None
return False
# store the session file in the server's own directory
session_file = os.path.join(self.conf_dir, server, 'session')
# retrieve a cached session
if os.path.isfile(session_file) and not self.options.password:
try:
sessionfile = open(session_file, 'r')
# read the session (format = username:session)
for line in sessionfile:
parts = line.split(':')
# if a username was passed, make sure it matches
if len(username):
if parts[0] == username:
self.se
|
NaPs/Kolekto
|
kolekto/commands/restore.py
|
Python
|
mit
| 657 | 0 |
import json
from kolekto.printer import printer
from kolekto.commands import Command
class Restore(Command):
""" Restore metadata from a json dump.
"""
help = 'Restore metadata from a json dump'
def prepare(self):
self.add_arg('file', help='The json dump file to restore')
def run(self, args, config):
mdb = self.get_metadata_db(args.tree)
with open(args.file) as fdump
|
:
dump = json.load(fdump)
for movie in dump:
mdb.save(movie['hash'], movie['movie'])
printer.verbose('Loaded {hash}', hash=movie['hash'])
printer.p('Loaded
|
{nb} movies.', nb=len(dump))
|
enthought/etsproxy
|
enthought/type_manager/hook.py
|
Python
|
bsd-3-clause
| 95 | 0 |
# proxy module
from __future__
|
import absolute_import
from apptools.type_manag
|
er.hook import *
|
mcinglis/libpp
|
templates/render.py
|
Python
|
agpl-3.0
| 2,004 | 0.001497 |
from __future__ import print_function
from argparse import ArgumentParser, Namespace
TEMPLATE_SEPARATOR = '#####'
OUTPUT_PREFIX = '''
// This file is the result of rendering `{filepath}`.
// You should make changes to this code by editing that template; not
// this file.
'''
def main(argv):
args = parse_args(argv[1:])
with open(args.output, 'w') as output:
print(render(args.filepath, args.limit), file=output)
return 0
def parse_args(raw_args):
p = ArgumentParser(
description=('Renders the specified template file with the given '
'arity limit. The template file should contain a line '
'containing just `{}`, with the template text above '
'that separator, and the context generation code '
'below. The code should define a `context` function '
'that generates a dict. The template text is then '
'rendered by: '
'`text.
|
format(limit=limit, **(context(limit))`')
.format(TEMPLATE_SEPARATOR))
p.add_argument('limit', type=int)
p.add_argument('filepath', type=str)
p.add_argument('-o', '--output', default='/dev/stdout',
help='The path to the file to write the rendered template to.')
|
return p.parse_args(raw_args)
def render(filepath, limit):
text = read_file(filepath)
template, code = text.split('\n' + TEMPLATE_SEPARATOR + '\n', 2)
context_func = execute(code, filepath)['context']
context = context_func(limit)
return (OUTPUT_PREFIX.format(filepath=filepath)
+ template.format(limit=limit, **context))
def execute(code, filepath):
code_locals = {}
code_obj = compile(code, filepath, 'exec')
exec(code_obj, {}, code_locals)
return code_locals
def read_file(path):
with open(path, 'r') as f:
return f.read()
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
stephan-rayner/HIL-TestStation
|
Tests/e3120/Maintenance/TransitionIntoState0.py
|
Python
|
mit
| 3,533 | 0.012171 |
"""
Wind Turbine Company - 2013
Author: Stephan Rayner
Email: stephan.rayner@gmail.com
"""
import time
from test.Base_Test import Base_Test
class Maintenance_153Validation(Base_Test):
def setUp(self):
self.WindSpeedValue = "4.5"
self.interface.reset()
self.interface.write("Yaw_Generation", "2")
self.interface.expect("Emergency_Stop", "off")
self.interface.expect("Maintenance_Mode", "off")
def test_MaintenanceSD46(self):
'''
Moving into Maintenance Mode while the turbine is running (State 2 or
Higher) Causes SD_46 to before any other shutdowns. In other words
SD_46 Should fire and only SD_46
'''
self._State2Setup()
self.interface.expect("Maintenance_Mode", "on")
self.TEST_CONDITION = self.interface.Shutdown.read(self.interface.Shutdown_List, return_onlyHigh = True)
print self.TEST_CONDITION
self.assertTrue("@GV.SD_46" in self.TEST_CONDITION,"Shutdown 46 did not fire")
self.assertEqual(self.TEST_CONDITION.keys()[0], "@GV.SD_46","Shutdown did not fire first")
self.assertEqual(len(self.TEST_CONDITION), 1,"More that one shutdown is pressent.")
self.TEST_CONDITION = self.interface.read("Turbine_State")
self.assertEqual(self.TEST_CONDITION,"0")
def test_MaintenanceHardwareControl(self):
'''
DO_BypLineProtRelMaintMode and DO_BypassRotorOverSpeed should be 0
then Maintenance Mode is activated SD_46 goes high and 1 minute later
DO_BypLineProtRelMaintMode and DO_BypassRotorOverSpeed should be 1
//I am using a running counter with a read to check time not a wait and read.
//This maintain that the values don't flip early.
'''
self._State2Setup()
read_Vars = ["@GV.DO_BypLineProtRelMaintMode","@GV.DO_BypassRotorOverSpeed"]
#
self.assertEqual(self._readpxUtils(read_Vars),["0","0"])
self.interface.expect("Maintenance_Mode","on")
elapseTime = 0.0
initialTime = time.time()
self.TEST_CONDITION = self.interface.Shutdown.read(self.interface.Shutdown_List, return_onlyHigh = True)
#
self.assertTrue("@GV.
|
SD_46" in self.TEST_CONDITION,"Shutdown 46 did not fire")
print "\nPlease Wait One Minute\n"
while((self._readpxUtils(read_Vars) == ["0",
|
"0"]) and (elapseTime < 120)):
elapseTime = time.time() - initialTime
expectedRunningTime = 60
tollerance = 10
self.TEST_CONDITION = self._readpxUtils(read_Vars)
#
self.assertEqual(self.TEST_CONDITION,["1","1"])
#
self.assertLessEqual(abs(expectedRunningTime-elapseTime),tollerance,"The hardware does not retain control over the UPR and the Smartplug unitil the breaks apply as expected:\nElapse Time: %s\n%s : %s\n%s : %s\n" % (str(elapseTime), read_Vars[0], self.TEST_CONDITION[0], read_Vars[1], self.TEST_CONDITION[1]))
#Helper Functions
def _State2Setup(self):
self.interface.write("Wind_Speed",self.WindSpeedValue)
self.interface.write("Yaw_Generation", "2")
print ("Waiting for 2 minutes")
time.sleep(70)# must hold this here for the Minute averages to hold
self.interface.Shutdown.bypass([24, 31])
self.interface.Shutdown.reset()
self.interface.start()
def _readpxUtils(self,List):
a = self.interface.mcc.read(List)
tmp=[]
for x in List:
tmp.append(a[x])
return tmp
|
HackerEarth/django-allauth
|
allauth/socialaccount/providers/stackexchange/urls.py
|
Python
|
mit
| 178 | 0.005618 |
from
|
allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from provider import StackExchangeProvider
|
urlpatterns = default_urlpatterns(StackExchangeProvider)
|
luotao1/Paddle
|
python/paddle/regularizer.py
|
Python
|
apache-2.0
| 5,630 | 0.007282 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['L1Decay', 'L2Decay']
import paddle.fluid as fluid
class L1Decay(fluid.regularizer.L1Decay):
r"""
Implement the L1 Weight Decay Regularization, which encourages the weights to be sparse.
It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the regularizer
in Optimizer will be used.
In the implementation, the loss function of L1 Weight Decay Regularization is as follows:
.. math::
loss = coeff * reduce\_sum(abs(x))
Args:
coeff(float, optional): regularization coeff. Default:0.0.
Examples:
.. code-block:: python
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L1Decay
import numpy as np
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
momentum = paddle.optimizer.Momentum(
learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=L1Decay(0.0001))
back = out.backward()
momentum.step()
momentum.clear_grad()
# Example2: set Regularizer in parameters
# Set L1 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2D
from paddle import ParamAttr
from paddle.regularizer import L2Decay
my_conv2d = Conv2D(
in_channels=10,
out_channels=10,
kernel_size=1,
stride=1,
padding=0,
weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
bias_attr=False)
"""
def __init__(self, coeff=0.0):
super(L1Decay, self).__init__(coeff)
class L2Decay(fluid.regularizer.L2Decay):
r"""
Implement the L2 Weight Decay Regularization, which helps to prevent the model over-fitting.
It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
When
|
set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the regularizer
in Optimizer will be used.
In the implementation, the loss function of L2 Weight Deca
|
y Regularization is as follows:
.. math::
loss = 0.5 * coeff * reduce\_sum(square(x))
Args:
regularization_coeff(float, optional): regularization coeff. Default:0.0
Examples:
.. code-block:: python
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L2Decay
import numpy as np
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
momentum = paddle.optimizer.Momentum(
learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=L2Decay(0.0001))
back = out.backward()
momentum.step()
momentum.clear_grad()
# Example2: set Regularizer in parameters
# Set L2 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2D
from paddle import ParamAttr
from paddle.regularizer import L2Decay
my_conv2d = Conv2D(
in_channels=10,
out_channels=10,
kernel_size=1,
stride=1,
padding=0,
weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
bias_attr=False)
"""
def __init__(self, coeff=0.0):
super(L2Decay, self).__init__(coeff)
|
ocarneiro/minecraft-pi
|
lab/blinky.py
|
Python
|
apache-2.0
| 441 | 0 |
import time
import RPi.GPIO as GPIO
LED_VERDE = 22
LED_VERMELHO = 18
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED_VERDE, GPIO.OUT)
GPIO.output(LED_VERDE, GPIO.LOW)
GPIO.setup(LED_VERMELHO, GPIO.OUT)
GPIO.output(LED_VERM
|
ELHO, GPIO.LOW)
while True:
GPIO.output(LED_VERDE, GPIO.HIGH)
GPIO.output(LED_VERMELHO, GPIO.LOW)
time.sleep(0.5)
GPIO.output(LED_VERDE, GPIO.LOW)
GPIO.output(LED_VERMELHO, GPI
|
O.HIGH)
time.sleep(0.5)
|
nikitanovosibirsk/vedro
|
vedro/plugins/terminator/__init__.py
|
Python
|
apache-2.0
| 63 | 0 |
fro
|
m ._terminator import Terminator
|
__all__ = ("Terminator",)
|
CityGenerator/Megacosm-Generator
|
tests/test_generator.py
|
Python
|
gpl-2.0
| 8,144 | 0.007859 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Fully test this module's functionality through the use of fixtures."
from megacosm.generators import Generator
import unittest2 as unittest
import json
from mock import Mock, patch, MagicMock
import fakeredis
import fixtures
from config import TestConfiguration
from pprint import pprint
class TestGenerator(unittest.TestCase):
def setUp(self):
self.redis = fakeredis.FakeRedis()
fixtures.generator.import_fixtures(self)
def tearDown(self):
self.redis.flushall()
def test_missing_feature(self):
""" Test a feature that doesn't exist."""
generator = Generator(self.redis)
with self.assertRaises(AttributeError):
generator.bananasmissingfeature
def test_static_seed(self):
''' Ensure a static seed can be set. '''
generator = Generator(self.redis, {'seed':1337})
self.assertEqual(generator.seed, 1337)
def test_randomseed(self):
''' ensure a see that is an integer is created. '''
generator = Generator(self.redis)
self.assertIs(type(generator.seed), int)
#FIXME these were taken from megacosm.util.Seed.Seed values. Don't hardcode them.
self.assertGreaterEqual(generator.seed,1)
self.assertLessEqual(generator.seed, 10000000)
def test_select_by_roll(self):
''' Select the bogus_size greater than or equal to the rolled number.'''
generator = Generator(self.redis, {'seed': 1007, 'bogus_size_roll': 37})
self.assertEqual({u'score': 40, u'name': u'large', u'multiplier': 1.0} ,generator.select_by_roll('bogus_size'))
def test_select_by_roll_key_doesnt_exist(self):
''' Try to select funion for a role, only to find it doesn't exist.'''
generator = Generator(self.redis)
with self.assertRaisesRegexp(ValueError, 'The key funion does not exist.'):
generator.select_by_roll('funion')
self.assertNotEqual('', generator.select_by_roll('bogus_size'))
def test_select_by_roll_highmin(self):
''' Test rolling outside our limits of 0-100. '''
generator = Generator(self.redis, { 'bogus_size_roll': 1037})
self.assertEquals({u'score': 100, u'name': u'giant', u'multiplier': 2.0},
generator.select_by_roll('bogus_size'))
generator = Generator(self.redis, {'bogus_size_roll': -1037})
self.assertEquals({u'score': 1, u'name': u'tiny', u'multiplier': 0.5},
generator.select_by_roll('bogus_size'))
def test_select_by_roll_key_wrong_type(self):
'''Intentionally try to roll on the wrong datatype.'''
generator = Generator(self.redis, {'seed': 1007, 'bogus_mylist_roll': 37})
with self.assertRaisesRegexp(Exception,
"The key bogus_mylist is not a zset; the type is list."):
generator.select_by_roll('bogus_mylist')
def test_random_list_value(self):
''' Find a random list value '''
generator = Generator(self.redis)
self.assertIn(generator.rand_value('bogus_mylist'), ['1','2','3','4'])
def test_rand_value_key_wrong_type(self):
''' Try to use a zset as a list. '''
generator = Generator(self.redis)
with self.assertRaisesRegexp(Exception,
"the key \(bogus_size\) doesn't appear to exist or isn't a list \(zset\)."):
generator.rand_value('bogus_size')
def test_rand_value_key_doesnt_exist(self):
''' Try to generate a
|
rand_value from a key that doesn't exist at all. '''
generator = Generator(self.redis)
with self.assertRaisesRegexp(Exception, "the key \(somekey\) doesn't appear to exist or isn't a list"):
generator.rand_value('somekey')
def test_dump_vars(self):
'''Ensure that the generator dumps properly. '''
generator = Generator(self.redis, {'seed': 1007})
self.assertIn('seed', generator.dump_var
|
s())
self.assertEqual(vars(generator), generator.dump_vars())
def test_generate_features(self):
'''test Feature Generation from a namekey'''
generator = Generator(self.redis, {'bogus_size_roll': 1})
self.assertNotIn('bogus', generator.dump_vars())
generator.generate_features('bogus')
self.assertIn('booyahfeature', generator.dump_vars())
self.assertEqual('Booyah',generator.booyahfeature)
self.assertEqual('tiny', generator.size['name'])
'''Ensure misslist from other was not included. '''
with self.assertRaises(AttributeError):
generator.misslist
def test_generate_feature_chance_100(self):
'''test Feature Generation from a namekey with 100% chance.'''
generator = Generator(self.redis, {'chnc_size_roll': 1})
self.assertNotIn('chnc', generator.dump_vars())
generator.generate_features('chnc')
self.assertIn('mylist', generator.dump_vars())
self.assertIn(generator.mylist, ['1','2','3','4'])
self.assertEqual('tiny', generator.size['name'])
'''Ensure misslist from other was not included. '''
with self.assertRaises(AttributeError):
generator.misslist
def test_generate_feature_chance_roll(self):
'''test Feature Generation from a namekey with 0% chance.'''
generator = Generator(self.redis, {'nochnc_size_roll': 1, 'nochnc_size_chance':5,'nochnc_mylist_chance':5 })
self.assertNotIn('mylist_chance', generator.dump_vars())
generator.generate_features('nochnc')
self.assertIn('mylist_chance', generator.dump_vars())
'''Ensure misslist from other was not included. '''
with self.assertRaises(AttributeError):
generator.misslist
def test_kind_description(self):
'''Ensure that kind description JSON is loaded properly.'''
generator = Generator(self.redis)
self.assertNotIn('kind', generator.dump_vars())
generator.generate_features('myknd')
self.assertIn('kind', generator.dump_vars())
def test_bad_kind_description(self):
'''Ensure that kind description with bad JSON throws an error.'''
generator = Generator(self.redis)
self.assertNotIn('kind', generator.dump_vars())
with self.assertRaises(ValueError):
generator.generate_features('mybadknd')
def test_error_handling_roll(self):
'''Ensure that select_by_roll handles errors properly.'''
generator = Generator(self.redis, {'incompleteset_size_roll':10 })
with self.assertRaises(ValueError) as cm:
generator.select_by_roll('derpderp_size')
self.assertEqual(str(cm.exception), "The key derpderp_size does not exist.")
with self.assertRaises(LookupError) as cm:
generator.select_by_roll('incompleteset_size')
self.assertEqual(str(cm.exception), 'The key (incompleteset_size) appears to be empty for a roll of 10- This should never happen.')
with self.assertRaises(ValueError) as cm:
generator.select_by_roll('badjson_widget')
self.assertEqual(str(cm.exception), '("JSON parsing error: Couldn\'t read json", \'waffles not json\')')
def test_bogus_generator(self):
'''Ensure that a fullname is generated.'''
generator = Generator(self.redis,{},'bogus')
self.assertIn('booyahfeature',generator.dump_vars())
def test_generate_feature(self):
'''Ensure that a fullname is generated.'''
generator = Generator(self.redis,{'mylist':'foobar'})
generator.generate_feature( 'bogus', 'bogus_mylist')
generator = Generator(self.redis,{'kind':'small', 'kind_description':'foobar'})
generator.generate_feature( 'myknd', 'myknd_kind')
def test_render_template(self):
'''Ensure that a fullname is generated.'''
generator = Generator(self.redis,{'test_value':'a bigger string'})
self.assertEqual('A large string, a bigger string.',generator.render_template("A large string, {{params.test_value}}."))
|
christianheinrichs/learning-lpthw
|
ex01/ex1-sd1.py
|
Python
|
gpl-3.0
| 226 | 0 |
#!/usr/b
|
in/env python2
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
print "Some
|
text"
|
atuljain/coderbounty
|
website/migrations/0017_comment.py
|
Python
|
agpl-3.0
| 819 | 0.001221 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0016_auto_20151128_2006'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField()),
('service_comment_id', models.IntegerField()),
('username', models.CharField(max_length=255)),
('created', models.DateTimeField()),
('updated', models.DateTimeField()),
('issue', models.Foreign
|
Key(to='website.Is
|
sue')),
],
),
]
|
SouthForkResearch/CHaMP_Metrics
|
tools/topometrics/methods/thalweg.py
|
Python
|
gpl-3.0
| 6,178 | 0.003237 |
import logging
from shapely.geometry import *
from lib.raster import Raster
import numpy as np
from os import path
import sys
sys.path.append(path.abspath(path.join(path.dirname(__file__), "../../..")))
from lib.shapefileloader import Shapefile
from lib.exception import DataException, MissingException
from lib.metrics import CHaMPMetric
import argparse
"""
/Users/work/Projects/CHaMP/tools/tmp/2011/Asotin/ASW00001-NF-F1P2BR/VISIT_228/Topo/GISLayers/Thalweg.shp
/Users/work/Projects/CHaMP/tools/tmp/2011/Asotin/ASW00001-NF-F1P2BR/VISIT_228/Topo/GISLayers/DEM.tif
1.0
"""
class ThalwegMetrics(CHaMPMetric):
TEMPLATE = {
'Min': None,
'Max': None,
'Mean': None,
'StDev': None,
'Count': None,
'Length': None,
'WSGradientRatio': None,
'WSGradientPC': None,
'Sinuosity': None,
'CV': None,
'ThalwegToCenterlineRatio': None
}
def calc(self, sThalwegshp, sDepthRaster, sWaterSurfaceRaster, fDist, visitMetrics):
if not path.isfile(sThalwegshp):
raise MissingException("Thalweg shapefile missing")
if not path.isfile(sDepthRaster):
raise MissingException("Depth raster missing")
if not path.isfile(sWaterSurfaceRaster):
raise MissingException("Surface raster missing")
wettedMainstemLength = visitMetrics['Wetted']['Centerline']['MainstemLength']
if wettedMainstemLength is None:
raise MissingException("No wetted mainstem length found in visit metrics")
sfile = Shapefile(sThalwegshp).featuresToShapely()
if len(sfile) < 1:
raise DataException("Thalweg shapefile has no features")
thalweg = sfile[0]['geometry']
depthRaster = Raster(sDepthRaster)
waterSurfaceRaster = Raster(sWaterSurfaceRaster)
samplepts = ThalwegMetrics.interpolateRasterAlongLine(thalweg, fDist)
results = ThalwegMetrics.lookupRasterValues(samplepts, depthRaster)['values']
# Get the elevation at the first (downstream) point on the Thalweg
dsElev = waterSurfaceRaster.getPixelVal(thalweg.coords[0])
usElev = waterSurfaceRaster.getPixelVal(thalweg.coords[-1])
if (np.isnan(dsElev)):
raise DataException('nodata detected in the raster for downstream point on the thalweg')
elif np.isnan(usElev):
raise DataException('nodata detected in the raster for upstream point on the thalweg')
waterSurfaceGradientRatio = (usElev - dsElev) / thalweg.length
waterSurfaceGradientPC = waterSurfaceGradientRatio * 100.0
# Thalweg straight length and sinuosity
firstPoint = Point(thalweg.coords[0])
lastPoint = Point(thalweg.coords[-1])
straightLength = firstPoint.distance(lastPoint)
sinuosity = thalweg.length / straightLength
self.metrics = {
'Min': np.nanmin(results),
'Max': np.nanmax(results),
'Mean': np.mean(results),
'StDev': np.std(results),
'Count': np.count_nonzero(results),
'Length': thalweg.length,
'WSGradientRatio': waterSurfaceGradientRatio,
'WSGradientPC': waterSurfaceGradientPC,
'Sinuosity': sinuosity,
'CV': 0.0,
'ThalwegToCenterlineRatio': thalweg.length / wettedMainstemLength
#, 'Values': results.data
}
if self.metrics['StDev'] != 0 and self.metrics['Mean'] != 0:
self.metrics['CV'] = self.metrics['StDev'] / self.metrics['Mean']
@staticmethod
def interpolateRasterAlongLine(line, fStationInterval):
"""
Given a cross section (Linestring) and a spacing point return regularly spaced points
along that line
:param line:
:param fStationInterval:
:return:
"""
points = [line.interpolate(currDist) for currDist in np.arange(0, line.length, fStationInterval)]
# Add the endpoint if it doesn't already exist
if points[-1] != line.coords[-1]:
points.append(Point(line.coords[-1]))
return points
|
@staticmethod
def lookupRasterValues(points, raster):
"""
Given an array of points with real-world coordinates, lookup values in raster
then mask out any nan/nodata values
:param points:
:param raster:
:re
|
turn:
"""
pointsdict = { "points": points, "values": [] }
for pt in pointsdict['points']:
pointsdict['values'].append(raster.getPixelVal(pt.coords[0]))
# Mask out the np.nan values
pointsdict['values'] = np.ma.masked_invalid(pointsdict['values'])
return pointsdict
if __name__ == "__main__":
logfmt = "[%(asctime)s - %(levelname)s] - %(message)s"
dtfmt = "%Y-%m-%d %I:%M:%S"
logging.basicConfig(filename='raster_metrics.log', level=logging.DEBUG, format=logfmt, datefmt=dtfmt)
# parse command line options
parser = argparse.ArgumentParser()
parser.add_argument('thalweg',
help='Path to the thalweg',
type=argparse.FileType('r'))
parser.add_argument('depthraster',
help='Path to the depth raster',
type=argparse.FileType('r'))
parser.add_argument('watersurfaceraster',
help='Path to the depth raster',
type=argparse.FileType('r'))
parser.add_argument('dist',
help='interval spacing between raster measurements',
type=float)
args = parser.parse_args()
if not args.depthraster:
print "ERROR: Missing arguments"
parser.print_help()
exit(0)
if not args.watersurfaceraster:
print "ERROR: Missing arguments"
parser.print_help()
exit(0)
try:
dMetrics = ThalwegMetrics(args.thalweg.name, args.depthraster.name, args.watersurfaceraster.name, args.dist)
except AssertionError as e:
sys.exit(0)
except Exception as e:
raise
sys.exit(0)
|
ctk3b/InterMol
|
intermol/desmond/__init__.py
|
Python
|
mit
| 3,879 | 0.003094 |
from collections import OrderedDict
import logging
import os
import shutil
import subprocess
import simtk.unit as units
from intermol.desmond.desmond_parser import load, save
DES_PATH = ''
logger = logging.getLogger('InterMolLog')
# terms we are ignoring for now.
#'en': 'Raw Potential',
#'E_x': 'Extended En.',
unwanted = ['E_x','E_n','E_k','constraints',]
key_dict = {
'E_p': 'Potential',
'stretch': 'Bond',
'angle': 'Angle',
'dihedral': 'All dihedrals',
'pair_vdw': 'LJ-14',
'pair_elec': 'Coulomb-14',
'nonbonded_vdw': 'LJ (SR)',
}
def standardize_key(in_key):
if in_key in key_dict:
out_key = key_dict[in_key]
else:
out_key = in_key
return out_key
def get_desmond_energy_from_file(energy_file):
"""Parses the desmond energy file. """
with open(energy_file, 'r') as f:
data = []
types = []
# First line of enegrp.dat file contains total energy terms.
line = f.readline()
# Just to make sure the line is what we think it is.
if line.startswith('time=0.000000'):
terms = line.split()
terms = terms[1:-2] # Exclude time, pressure, and volume.
for term in terms:
key, value = term.split('=')
types.append(standardize_key(key))
data.append(float(value))
# Parse rest of file for individual energy grouops.
for line in f:
if '(0.000000)' in line: # Time = 0.0
words = line.split()
if words[-1] == 'total':
continue
key = standardize_key(words[0])
if key:
types.append(key)
data.append(words[-1])
data = [float(value) * units.kilocalories_per_mole for value in data]
e_out = OrderedDict(zip(types, data))
# Discard non-energy terms.
for group in unwanted:
if group in e_out:
del e_out[group]
return e_out
def energies(cms, cfg, des_path):
"""Evalutes energies of DESMOND files
Args:
cms (str): Path to .cms file.
cfg (str): Path to .cfg file.
des_path (str): Path to DESMOND binaries.
Returns:
tot_energy:
energy_file:
"""
logger.info('Evaluating energy of {0}'.format(cms))
cms = os.path.abspath(cms)
cfg = os.path.abspath(cfg)
direc, cms_filename = os.path.split(cms)
cwd = os.getcwd()
name = os.path.splitext(cms_filename)[0]
energy_file = '%s/%s.enegrp.dat' % (direc, name)
if des_path and not (des_path == ''):
desmond_bin = os.path.join(des_path,'desmond')
elif os.environ.get('SCHRODINGER'):
desmond_bin = os.path.join(os.environ.get('SCHRODINGER'), 'desmond')
else:
raise Exception('Desmond binary not found')
# Use DESMOND To evaluate energy
# cd to directory of cms file so that files generated by desmond
# don't clog the working directory
os.chdir(direc)
if os.path.exists('trj'):
shutil.rmtree('trj')
cmd = [desmond_bin, '-WAIT', '-P', '1', '-in', cms, '-JOBNAME', name, '-c', cfg]
logger.debug('Running DESM
|
OND with command:\n %s' % ' '.join(cmd))
with open('desmond_stdout.txt', 'w') as out, open('desmond_stderr.txt', 'w') as err:
exit = subprocess.call(cmd, std
|
out=out, stderr=err)
if exit:
logger.error('Energy evaluation failed. See %s/desmond_stderr.txt' % direc)
os.chdir(cwd) # return directory up a level again
raise Exception('Energy evaluation failed for {0}'.format(cms))
tot_energy = get_desmond_energy_from_file(energy_file)
# for now, remove the desmond '-out.cms' file.
outcms = cms[:-4] + '-out' + cms[-4:]
os.remove(outcms)
os.chdir(cwd) # return directory up a level again
return tot_energy, energy_file
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_fabs45_detached_award_financial_assistance.py
|
Python
|
cc0-1.0
| 3,014 | 0.006636 |
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs45_detached_award_financial_assistance'
def test_column_headers(database):
expected_subset = {'row_number', 'indirect_federal_sharing', 'federal_action_obligation',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test when both are pro
|
vided, IndirectCostFederalShareAmount should be less than or equal to
FederalActionObligation.
|
"""
# One or both not provided, rule ignored
det_award_1 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=None, federal_action_obligation=None)
det_award_2 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=123, federal_action_obligation=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=None, federal_action_obligation=123)
# ICFSA is 0, rule ignored
det_award_4 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=0, federal_action_obligation=123)
# Both have the same sign and are appropriately valued
det_award_5 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=-1, federal_action_obligation=-1)
det_award_6 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=5, federal_action_obligation=6)
# Ignore when CorrectionDeleteIndicator is D
det_award_7 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=123, federal_action_obligation=0,
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5,
det_award_6, det_award_7])
assert errors == 0
def test_failure(database):
""" Test failure when both are provided, IndirectCostFederalShareAmount should be less than or equal to
FederalActionObligation.
"""
# ICFSA is not 0 but FAO is
det_award_1 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=123, federal_action_obligation=0)
# Differing signs
det_award_2 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=-1, federal_action_obligation=1)
det_award_3 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=1, federal_action_obligation=-1)
# Same sign, absolute value incorrect
det_award_4 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=5, federal_action_obligation=4)
det_award_5 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=-5, federal_action_obligation=-4)
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 5
|
SANBI-SA/tools-sanbi-uwc
|
tools/novo_sort/novo_sort.py
|
Python
|
gpl-3.0
| 986 | 0.008114 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
from subprocess import check_call, CalledProcessError
import shlex
import sys
import logging
log = logging.getLogger( __name__ )
def novo_sort( bam_filename, output_filename ):
cmdline_str = "novosort -c 8 -m 8G -s -f {} -o {}".format( bam_filename, output_filename )
cmdline = newSplit(cmdline_str)
try:
check_call(cmdline)
except CalledProcessError:
print("Error running the nova-sort", file=sys.stderr)
def newSplit(value):
lex
|
= shlex.shlex(value)
lex.quotes = '"
|
'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def main():
parser = argparse.ArgumentParser(description="Re-sorting aligned files by read position")
parser.add_argument('output_filename')
parser.add_argument('--bam_filename')
args = parser.parse_args()
novo_sort(args.bam_filename, args.output_filename)
if __name__ == "__main__":
main()
|
GeoNode/geonode-notification
|
notification/migrations/0001_initial.py
|
Python
|
mit
| 2,390 | 0.003766 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='NoticeQueueBatch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=
|
True)),
('pickled_data', models.TextField()),
],
),
migrations.CreateModel(
name='NoticeSetting',
|
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('medium', models.CharField(max_length=1, verbose_name='medium', choices=[(0, b'email')])),
('send', models.BooleanField(verbose_name='send')),
],
options={
'verbose_name': 'notice setting',
'verbose_name_plural': 'notice settings',
},
),
migrations.CreateModel(
name='NoticeType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=40, verbose_name='label')),
('display', models.CharField(max_length=50, verbose_name='display')),
('description', models.CharField(max_length=100, verbose_name='description')),
('default', models.IntegerField(verbose_name='default')),
],
options={
'verbose_name': 'notice type',
'verbose_name_plural': 'notice types',
},
),
migrations.AddField(
model_name='noticesetting',
name='notice_type',
field=models.ForeignKey(verbose_name='notice type', to='notification.NoticeType'),
),
migrations.AddField(
model_name='noticesetting',
name='user',
field=models.ForeignKey(verbose_name='user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='noticesetting',
unique_together=set([('user', 'notice_type', 'medium')]),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.