repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
RPiPasswordGenerator/Twitter-Password-Generator-for-Python
|
auth.py
|
Python
|
apache-2.0
| 672 | 0 |
#!/usr/bin/env python
# Copyright 2014 RPiPasswordGenerator
# file: auth.py
# This file just needs to be run.
import tweepy
import sys
CONSUMER_KEY = 'aLrazWkhGaRyLe30HWZcCJrnN'
CONSUMER_SECRET = 'jNSbrJ9TkOobJTbzL4bfd7CWg5x0kv6KMLCZKO5FRAMdIaFvmK'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.secure = True
auth_url = auth
|
.get_authorization_url()
print 'Please visit this URL, to get your access key: ' + auth_url
verifier = raw_input('PIN: ').strip()
auth.get_access_token(verifier)
print "\nPlease put these codes where asked, in run.py\n"
print "ACCESS_KEY = '%s'" % auth.access_token.key
print "ACCESS
|
_SECRET = '%s'" % auth.access_token.secret
|
jithinbp/vLabtool-v0
|
v0/apps/scope.py
|
Python
|
gpl-3.0
| 18,018 | 0.051116 |
#!/usr/bin/python
'''
oscilloscope for the vLabtool - version 0. \n
Also Includes XY plotting mode, and fitting against standard Sine/Square functions\n
'''
import os
os.environ['QT_API'] = 'pyqt'
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
from PyQt4 import QtCore, QtGui
import time,sys
from v0.templates import analogScope
from v0.analyticsClass import analyticsClass
import sys,os,string
import time
import sys
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
import scipy.optimize as optimize
import scipy.fftpack as fftpack
err_count=0
trial = 0
start_time = time.time()
fps = None
dacval=0
from v0.commands_proto import *
params = {
'image' : 'scope.png',
'name':'Oscilloscope'
}
class AppWindow(QtGui.QMainWindow, analogScope.Ui_MainWindow):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
self.I=kwargs.get('I',None)
self.math = analyticsClass()
self.setWindowTitle(self.I.generic_name + ' : ' +self.I.H.version_string)
self.plot=pg.PlotWidget()
#cross hair
self.vLine = pg.InfiniteLine(angle=90, movable=True)
#self.vLine.setPen(color=(135,44,64,150), width=3)
self.plot.addItem(self.vLine, ignoreBounds=False)
self.proxy = pg.SignalProxy(self.vLine.scene().sigMouseMoved, rateLimit=60, slot=self.readCursor)
self.fps=0
self.max_samples_per_channel=[0,self.I.MAX_SAMPLES/4,self.I.MAX_SAMPLES/4,self.I.MAX_SAMPLES/4,self.I.MAX_SAMPLES/4]
self.liss_win=None
self.liss_ready=False
self.liss_animate_arrow1=None
self.liss_animate_arrow2=None
self.liss_animate_arrow3=None
self.liss_anim1=None
self.liss_anim2=None
self.liss_anim3=None
self.samples=self.I.MAX_SAMPLES/4#self.sample_slider.value()
self.active_channels=1
self.channel_states=np.array([1,0,0,0])
self.channels_in_buffer=1
self.chan1remap='CH1'
self.ch123sa = 0
g=1.75
self.timebase = g
self.lastTime=time.time()
self.trace_colors=[(0,255,20),(255,255,0),(255,10,100),(10,255,255)]
self.plot.setLabel('bottom', 'Time -->>', units='S')
labelStyle = {'color': 'rgb%s'%(str(self.trace_colors[0])), 'font-size': '11pt'}
self.plot.setLabel('left','CH1', units='V',**labelStyle)
self.plot.addLegend(offset=(-10,30))
self.plot2 = pg.ViewBox()
self.ax2 = pg.AxisItem('right')
self.plot.plotItem.layout.addItem(self.ax2, 2, 3)
self.plot.plotItem.scene().addItem(self.plot2)
self.ax2.linkToView(self.plot2)
self.plot2.setXLink(self.plot.plotItem)
self.ax2.setZValue(-10000)
labelStyle = {'color': 'rgb%s'%(str(self.trace_colors[1])), 'font-size': '13pt'}
self.ax2.setLabel('CH2', units='V', **labelStyle)
self.plot2.setGeometry(self.plot.plotItem.vb.sceneBoundingRect())
self.plot2.linkedViewChanged(self.plot.plotItem.vb, self.plot2.XAxis)
## Handle view resizing
self.plot.getViewBox().sigStateChanged.connect(self.updateViews)
self.curve1 = self.plot.plot(name='CH1'); self.curve1.setPen(color=self.trace_colors[0], width=1)
self.curve2 = self.plot.plot(name='CH2'); self.curve2.setPen(color=self.trace_colors[1], width=1)
self.curve3 = self.plot.plot(name='CH3'); self.curve3.setPen(color=self.trace_colors[2], width=1)
self.curve4 = self.plot.plot(name='CH4'); self.curve4.setPen(color=self.trace_colors[3], width=1)
self.curve_lis = self.plot.plot(); self.curve_lis.setPen(color=(255,255,255), width=1)
self.curveF=[]
for a in range(2):
self.curveF.append( self.plot.plot() ); self.curveF[-1].setPen(color=(255,255,255), width=1)
self.curveB = pg.PlotDataItem(name='CH2')
self.plot2.addItem(self.curveB)
self.curveB.setPen(color=self.trace_colors[1], width=1)
self.curveFR = pg.PlotDataItem()
self.plot2.addItem(self.curveFR); self.curveFR.setPen(color=(255,255,255), width=1)
self.CH1_ENABLE.setStyleSheet('background-color:rgba'+str(self.trace_colors[0])[:-1]+',3);color:(0,0,0);')
self.CH2_ENABLE.setStyleSheet('background-color:rgba'+str(self.trace_colors[1])[:-1]+',3);color:(0,0,0);')
for a in range(4):
self.trigger_select_box.setItemData(a, QtGui.QColor(*self.trace_colors[a]), QtCore.Qt.BackgroundRole);
self.triggerChannelName='CH1'
self.arrow = pg.ArrowItem(pos=(0, 0), angle=0)
self.plot.addItem(self.arrow)
#markings every 5 Volts
self.voltsperdiv = ['5V/div','3V/div','2V/div','1V/div','500mV/div','400mV/div','300mV/div','100mV/div']
self.trigger_channel=0
self.trigger_level = 0
self.trigtext = pg.TextItem(html=self.trigger_text('CH1'), anchor=(1.2,0.5), border='w', fill=(0, 0, 255, 100),angle=0)
self.plot.addItem(self.trigtext)
self.plot.showGrid(True,False,0.4)
self.scope_type=0
self.plot_area.addWidget(self.plot)
self.CH1_REMAPS.addItems(self.I.allAnalogChannels)
self.showgrid()
self.trigtext.setParentItem(self.arrow)
self.I.configure_trigger(self.trigger_channel,self.triggerChannelName,0)
self.autoRange()
self.timer = QtCore.QTimer()
self.finished=False
self.timer.singleShot(500,self.start_capture)
def updateViews(self):
self.plot2.setGeometry(self.plot.getViewBox().sceneBoundingRect())
self.plot2.linkedViewChanged(self.plot.plotItem.vb, self.plot2.XAxis)
def trigger_text(self,c):
return '<div style="text-align: center"><span style
|
="color: #FFF; font-size: 8pt;">
|
'+c+'</span></div>'
def showgrid(self):
return
def start_capture(self):
if self.finished:
return
if(self.freezeButton.isChecked()):
self.timer.singleShot(200,self.start_capture)
return
temperature=self.I.get_temperature()
self.plot.setTitle('%0.2f fps, %0.1f ^C' % (self.fps,temperature ) )
self.channels_in_buffer=self.active_channels
a = self.CH1_ENABLE.isChecked()
b = self.CH2_ENABLE.isChecked()
c = self.FOURCHAN_ENABLE.isChecked()
if c:
self.active_channels=4
elif b:
self.active_channels=2
elif a:
self.active_channels=1
else:
self.active_channels=0
self.channels_in_buffer=self.active_channels
self.channel_states[0]=a
self.channel_states[1]=b
self.channel_states[2]=c
self.channel_states[3]=c
if self.active_channels:
self.I.configure_trigger(self.trigger_channel,self.triggerChannelName,self.trigger_level,resolution=10)
self.I.capture_traces(self.active_channels,self.samples,self.timebase,self.chan1remap,self.ch123sa)
self.timer.singleShot(self.samples*self.I.timebase*1e-3+10,self.update)
def update(self):
n=0
while(not self.I.oscilloscope_progress()[0]):
time.sleep(0.1)
print self.timebase,'correction required',n
n+=1
if n>10:
self.timer.singleShot(100,self.start_capture)
return
if(self.channels_in_buffer>=1):self.I.__fetch_channel__(1)
if(self.channels_in_buffer>=2):self.I.__fetch_channel__(2)
if(self.channels_in_buffer>=3):self.I.__fetch_channel__(3)
if(self.channels_in_buffer>=4):self.I.__fetch_channel__(4)
self.curve1.clear()
self.curve2.clear()
self.curve3.clear()
self.curve4.clear()
self.curveB.clear()
self.curveF[0].clear()
self.curveF[1].clear()
self.curveFR.clear()
msg='';pos=0
for fitsel in [self.fit_select_box,self.fit_select_box_2]:
if fitsel.currentIndex()<4:
if len(msg)>0:
msg+='\n'
if self.channel_states[fitsel.currentIndex()]:
if fitsel.currentText()=='CH2':
msg+='FIT '+chr(pos+65)+': '+self.fitData(self.I.achans[fitsel.currentIndex()].get_xaxis(),\
self.I.achans[fitsel.currentIndex()].get_yaxis(),self.curveFR)
else:
msg+='FIT '+chr(pos+65)+': '+self.fitData(self.I.achans[fitsel.currentIndex()].get_xaxis(),\
self.I.achans[fitsel.currentIndex()].get_yaxis(),self.curveF[pos])
else:
msg+='FIT '+chr(pos+65)+': Channel Unavailable'
pos+=1
if len(msg):
self.message_label.setText(msg)
pos=0
if self.Liss_show.isChecked():
chans = ['CH1','CH2','CH3','CH4']
lissx = self.Liss_x.currentText()
lissy = self.Liss_y.currentText()
self.liss_x = chans.index(lissx)
self.liss_y = chans.index(lissy)
la=self.I.achans[self.liss_x].get_yaxis()
lb=self.I.achans[self.liss_y].get_yaxis()
if(self.liss_x<self.active_channels and self.liss_y<self.active_channels and len(la)==len(lb)):
self.curve_lis.setData(self.I.achans[self.l
|
elliotthill/django-oscar
|
sites/sandbox/apps/gateway/views.py
|
Python
|
bsd-3-clause
| 1,784 | 0.001121 |
import logging
from django.views import generic
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.mail import send_mail
from django import http
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template import Context
from apps.gateway import forms
from oscar.apps.customer.forms import generate_username
logger = logging.getLogger('gateway')
class GatewayView(generic.FormView):
template_name = 'gateway/form.html'
form_class = forms.GatewayForm
def form_valid(self, form):
real_email = form.cleaned_data['email']
username = generate_username()
password = generate_username
|
()
email = 'dashboard-user-%s@oscarcommerce.com' % username
user = self.create_dashboard_user(username, email, password)
self.send_confirmation_email(real_email, user, password)
logger.info("Created dashboard user #%d for %s",
|
user.id, real_email)
messages.success(
self.request,
"The credentials for a dashboard user have been sent to %s" % real_email)
return http.HttpResponseRedirect(reverse('gateway'))
def create_dashboard_user(self, username, email, password):
user = User.objects.create_user(username, email, password)
user.is_staff = True
user.save()
return user
def send_confirmation_email(self, real_email, user, password):
msg = get_template('gateway/email.txt').render(Context({
'email': user.email,
'password': password
}))
send_mail('Dashboard access to Oscar sandbox',
msg, 'blackhole@sandbox.qa.tangentlabs.co.uk',
[real_email])
|
NeostreamTechnology/Microservices
|
venv/lib/python2.7/site-packages/connexion/cli.py
|
Python
|
mit
| 5,463 | 0.002014 |
import logging
import sys
from os import path
import click
from clickclick import AliasedGroup, fatal_error
import connexion
from connexion.mock import MockResolver
logger = logging.getLogger('connexion.cli')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def validate_wsgi_server_requirements(ctx, param, value):
if value == 'gevent':
try:
import gevent # NOQA
except:
fatal_error('gevent library is not installed')
elif value == 'tornado':
try:
import tornado # NOQA
except:
fatal_error('tornado library is not installed')
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Connexion {}'.format(connexion.__version__))
ctx.exit()
@click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS)
@click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Print the current version number and exit.')
def main():
pass
@main.command()
@click.argument('spec_file')
@click.argument('base_module_path', required=False)
@click.option('--port', '-p', default=5000, type=int, help='Port to listen.')
@click.option('--host', '-H', type=str, help='Host interface to bind on.')
@click.option('--wsgi-server', '-w', default='flask',
type=click.Choice(['flask', 'gevent', 'tornado']),
callback=validate_wsgi_server_requirements,
help='Which WSGI server container to use.')
@click.option('--stub',
help='Returns status code 501, and `Not Implemented Yet` payload, for '
'the endpoints which handlers are not found.',
is_flag=True, default=False)
@click.option('--mock', metavar='MOCKMODE', type=click.Choice(['all', 'notimplemented']),
help='Returns example data for all endpoints or for which handlers are not found.')
@click.option('--hide-spec',
help='Hides the API spec in JSON format which is by default available at `/swagger.json`.',
is_flag=True, default=False)
@click.option('--hide-console-ui',
help='Hides the the API console UI which is by default available at `/ui`.',
is_flag=True, default=False)
@click.option('--console-ui-url', metavar='URL',
help='Personalize what URL path the API console UI will be mounted.')
@click.option('--console-ui-from', metavar='PATH',
help='Path to a customized API console UI dashboard.')
@click.option('--auth-all-paths',
help='Enable authentication to paths not defined in the spec.',
is_flag=True, default=False)
@click.option('--validate-responses',
help='Enable validation of response values from operation handlers.',
is_flag=True, default=False)
@click.option('--strict-validation',
help='Enable strict validation of request payloads.',
is_flag=True, default=False)
@click.option('--debug', '-d', help='Show debugging information.',
is_flag=True, default=False)
@click.option('--verbose', '-v', help='Show verbose information.', count=True)
@click.option('--base-path', metavar='PATH',
help='Override the basePath in the API spec.')
def run(spec_file,
base_module_path,
port,
host,
wsgi_server,
stub,
mock,
hide_spec,
hide_console_ui,
console_ui_url,
console_ui_from,
auth_all_paths,
validate_responses,
strict_validation,
debug,
verbose,
base_path):
"""
Runs a server compliant with a OpenAPI/Swagger 2.0 Specification file.
Arguments:
- SPEC_FILE: specification file that describes the server endpoints.
- BASE_MODULE_PATH (optional): filesystem path where the API endpoints handlers are going to be imported from.
"""
logging_level = logging.WARN
if verbose > 0:
logging_level = logging.INFO
if debug or verbose > 1:
logging_level = logging.DEBUG
debug = True
logging.basicConfig(level=logging_level)
spec_file_full_path = path.abspath(spec_file)
py_module_path = base_module_path or path.dirname(spec_file_full_path)
sys.path.insert(1, path.abspath(py_module_path))
logger.debug('Added {} to system path.'.format(py_module_path))
resolver_error = None
if stub:
resolver_error = 501
api_extra_args = {}
if mock:
resolver = MockResolver(mock_all=mock == 'all')
api_extra_args['resolver'] = resolver
app = connexion.FlaskApp(__name__,
swagger_json=not hide_spec,
swagger_ui=not hide_console_ui,
|
swagger_path=console_ui_from or None,
|
swagger_url=console_ui_url or None,
auth_all_paths=auth_all_paths,
debug=debug)
app.add_api(spec_file_full_path,
base_path=base_path,
resolver_error=resolver_error,
validate_responses=validate_responses,
strict_validation=strict_validation,
**api_extra_args)
app.run(port=port,
host=host,
server=wsgi_server,
debug=debug)
if __name__ == '__main__': # pragma: no cover
main()
|
kaspermarstal/SuperElastix
|
ContinuousRegistration/Source/make_evaluation.py
|
Python
|
apache-2.0
| 4,367 | 0.007786 |
import os, json, datetime
from ContinuousRegistration.Source.make_registration_scripts import parser
from ContinuousRegistration.Source.util import logging, load_submissions, write_json
from ContinuousRegistration.Source.datasets import load_datasets
def run(parameters):
submissions = load_submissions(parameters)
datasets = load_datasets(parameters)
results = {}
for team_name, blueprint_file_names in submissions.items():
for blueprint_file_name in blueprint_file_names:
if not team_name in results:
results[team_name] = {}
blueprint_name, blueprint_ext = os.path.splitext(os.path.basename(blueprint_file_name))
if not blueprint_name in results[team_name]:
results[team_name][blueprint_name] = {}
logging.info('Loading blueprint %s/%s.' % (team_name, os.path.basename(blueprint_name)))
blueprint = json.load(open(blueprint_file_name))
for dataset_name in blueprint['Datasets']:
if not dataset_name in datasets:
continue
dataset = datasets[dataset_name]
results[team_name][blueprint_name][dataset_name] = []
for file_names in dataset.generator():
output_directory = os.path.join(parameters.output_directory, team_name, blueprint_name)
logging.info('Evaluating registration for blueprint %s and images %s.', blueprint_name, file_names['image_file_names'])
try:
results[team_name][blueprint_name][dataset.name].append(dataset.evaluate(
parameters.superelastix, file_names, output_directory))
if hasattr(parameters, 'make_images') and parameters.make_images:
dataset.make_images(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_labels') and parameters.make_labels:
dataset.make_labels(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_difference_images') and parameters.make_difference_images:
dataset.make_difference_images(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_checkerboards') and parameters.make_checkerboards:
dataset.make_checkerboards(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_image_checkerboards') and parameters.make_image_checkerboards:
dataset.make_image_checkerboards(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_label_checkerboards') and parameters.make_label_checkerboards and dataset.name in ["CUMC12", "IBSR18", "LPBA40", "MGH10"]:
dataset.make_label_checkerboards(parameters.superelastix, file_names, output_directory)
except Exception as e:
logging.error('Error during evaluation of %s\'s blueprint %s and dataset %s: %s'
% (team_name, blueprint_name, dataset.name, str(e)))
write_json(os.path.join(parameters.output_directory,
'results.json'), results)
return results
if __name__ == '__main__':
parser.add_argument('--make-images', '-mi', type=bool, default=False, help="Warp moving images.")
parser.add_argument('--make-labels',
|
'-ml', type=bool, default=False, help="Warp moving labels.")
parser.add_argument('--make-difference-images', '-m
|
di', type=bool, default=False, help="Warp moving images and subtract from fixed images.")
parser.add_argument('--make-checkerboards', '-mc', type=bool, default=False, help="Warp checkerboard pattern.")
parser.add_argument('--make-image-checkerboards', '-mic', type=bool, default=False,
help="Warp moving images and make checkerboard with fixed and warped moving image.")
parser.add_argument('--make-label-checkerboards', '-mlc', type=bool, default=False,
help="Warp moving labels and make checkerboard with fixed and warped moving label.")
run(parser.parse_args())
|
mbdriscoll/asp-old
|
specializers/stencil/stencil_python_front_end.py
|
Python
|
bsd-3-clause
| 6,699 | 0.00627 |
"""Takes a Python AST and converts it to a corresponding StencilModel.
Throws an exception if the input does not represent a valid stencil
kernel program. This is the first stage of processing and is done only
once when a stencil class is initialized.
"""
from stencil_model import *
from assert_utils import *
import ast
from asp.util import *
# class to convert from Python AST to StencilModel
class StencilPyt
|
honFrontEnd(ast.NodeTransformer):
def __init__(self):
super(StencilPythonFrontEnd, self).__init__()
def parse(self, ast):
return self.visit(ast)
def visit_Module(self, node):
body = map(self.visi
|
t, node.body)
assert len(body) == 1
assert_has_type(body[0], StencilModel)
return body[0]
def visit_FunctionDef(self, node):
assert len(node.decorator_list) == 0
arg_ids = self.visit(node.args)
assert arg_ids[0] == 'self'
self.output_arg_id = arg_ids[-1]
self.input_arg_ids = arg_ids[1:-1]
kernels = map(self.visit, node.body)
interior_kernels = map(lambda x: x['kernel'], filter(lambda x: x['kernel_type'] == 'interior_points', kernels))
border_kernels = map(lambda x: x['kernel'], filter(lambda x: x['kernel_type'] == 'border_points', kernels))
assert len(interior_kernels) <= 1, 'Can only have one loop over interior points'
assert len(border_kernels) <= 1, 'Can only have one loop over border points'
return StencilModel(map(lambda x: Identifier(x), self.input_arg_ids),
interior_kernels[0] if len(interior_kernels) > 0 else Kernel([]),
border_kernels[0] if len(border_kernels) > 0 else Kernel([]))
def visit_arguments(self, node):
assert node.vararg == None, 'kernel function may not take variable argument list'
assert node.kwarg == None, 'kernel function may not take variable argument list'
return map (self.visit, node.args)
def visit_Name(self, node):
return node.id
def visit_For(self, node):
# check if this is the right kind of For loop
if (type(node.iter) is ast.Call and
type(node.iter.func) is ast.Attribute):
if (node.iter.func.attr == "interior_points" or
node.iter.func.attr == "border_points"):
assert node.iter.args == [] and node.iter.starargs == None and node.iter.kwargs == None, 'Invalid argument list for %s()' % node.iter.func.attr
grid_id = self.visit(node.iter.func.value)
assert grid_id == self.output_arg_id, 'Can only iterate over %s of output grid "%s" but "%s" was given' % (node.iter.func.attr, self.output_arg_id, grid_id)
self.kernel_target = self.visit(node.target)
body = map(self.visit, node.body)
self.kernel_target = None
return {'kernel_type': node.iter.func.attr, 'kernel': Kernel(body)}
elif node.iter.func.attr == "neighbors":
assert len(node.iter.args) == 2 and node.iter.starargs == None and node.iter.kwargs == None, 'Invalid argument list for neighbors()'
self.neighbor_grid_id = self.visit(node.iter.func.value)
assert self.neighbor_grid_id in self.input_arg_ids, 'Can only iterate over neighbors in an input grid but "%s" was given' % grid_id
neighbors_of_grid_id = self.visit(node.iter.args[0])
assert neighbors_of_grid_id == self.kernel_target, 'Can only iterate over neighbors of an output grid point but "%s" was given' % neighbors_of_grid_id
self.neighbor_target = self.visit(node.target)
body = map(self.visit, node.body)
self.neighbor_target = None
self.neigbor_grid_id = None
neighbors_id = self.visit(node.iter.args[1])
return StencilNeighborIter(Identifier(self.neighbor_grid_id), neighbors_id, body)
else:
assert False, 'Invalid call in For loop argument \'%s\', can only iterate over interior_points, boder_points, or neighbor_points of a grid' % node.iter.func.attr
else:
assert False, 'Unexpected For loop \'%s\', can only iterate over interior_points, boder_points, or neighbor_points of a grid' % node
def visit_AugAssign(self, node):
target = self.visit(node.target)
assert type(target) is OutputElement, 'Only assignments to current output element permitted'
return OutputAssignment(ScalarBinOp(OutputElement(), node.op, self.visit(node.value)))
def visit_Assign(self, node):
targets = map (self.visit, node.targets)
assert len(targets) == 1 and type(targets[0]) is OutputElement, 'Only assignments to current output element permitted'
return OutputAssignment(self.visit(node.value))
def visit_Subscript(self, node):
if type(node.slice) is ast.Index:
grid_id = self.visit(node.value)
target = self.visit(node.slice.value)
if grid_id == self.output_arg_id and target == self.kernel_target:
return OutputElement()
elif target == self.kernel_target:
return InputElementZeroOffset(Identifier(grid_id))
elif grid_id == self.neighbor_grid_id and target == self.neighbor_target:
return Neighbor()
elif isinstance(target, Expr):
return InputElementExprIndex(Identifier(grid_id), target)
else:
assert False, 'Unexpected subscript index \'%s\' on grid \'%s\'' % (target, grid_id)
else:
assert False, 'Unsupported subscript object \'%s\' on grid \'%s\'' % (node.slice, grid_id)
def visit_BinOp(self, node):
return ScalarBinOp(self.visit(node.left), node.op, self.visit(node.right))
def visit_Num(self, node):
return Constant(node.n)
def visit_Call(self, node):
assert isinstance(node.func, ast.Name), 'Cannot call expression'
if node.func.id == 'distance' and len(node.args) == 2:
if ((node.args[0].id == self.neighbor_target and node.args[1].id == self.kernel_target) or \
(node.args[0].id == self.kernel_target and node.args[1].id == self.neighbor_target)):
return NeighborDistance()
elif ((node.args[0].id == self.neighbor_target and node.args[1].id == self.neighbor_target) or \
(node.args[0].id == self.kernel_target and node.args[1].id == self.kernel_target)):
return Constant(0)
else:
return MathFunction(node.func.id, map(self.visit, node.args))
|
voiceofrae/Python
|
antivowel.py
|
Python
|
mit
| 201 | 0.0199 |
import re
def anti_vowel(text):
|
newtext = re.sub('[AEIOUaeiou]', '', text)
print newtext
anti_vowel("Hey Look Words!")
anti_vowel("THE QUICK BROWN FOX SLYLY JUMPED OVER TH
|
E LAZY DOG")
|
SINGROUP/pycp2k
|
pycp2k/classes/_check_spline3.py
|
Python
|
lgpl-3.0
| 674 | 0.002967 |
from pycp2k.inputsecti
|
on import InputSection
from ._each295 import _each295
class _check_spline3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_k
|
ey = None
self.EACH = _each295()
self._name = "CHECK_SPLINE"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
UQ-UQx/PerspectivesX
|
perspectivesx_project/django_auth_lti/tests/test_verification.py
|
Python
|
mit
| 1,706 | 0.005862 |
from unittest import TestCase
from mock import MagicMock
from django_auth_lti.verification import is_allowed
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
class TestVerification(TestCase):
def test_is_allowed_config_failure(self):
request = MagicMock(LTI={})
allowed_roles = ["admin", "student"]
self.assertRaises(ImproperlyConfigured, is_allowed,
request, allowed_roles, False)
def test_is_allowed_success(self):
request = MagicMock(LTI={"roles": ["admin"]})
allowed_roles = ["admin", "student"]
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertTrue(user_is_allowed)
def test_is_allo
|
wed_success_one_role(self):
request = MagicMock(LTI={"roles": ["admin"]})
allowed_roles = "admin"
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertTrue(user_is_allowed)
def test_is_allowed_failure(self):
request = MagicMock(LTI={"roles":[]})
allowed_roles = ["admin", "student"]
u
|
ser_is_allowed = is_allowed(request, allowed_roles, False)
self.assertFalse(user_is_allowed)
def test_is_allowed_failure_one_role(self):
request = MagicMock(LTI={"roles":[]})
allowed_roles = "admin"
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertFalse(user_is_allowed)
def test_is_allowed_exception(self):
request = MagicMock(LTI={"roles":["TF"]})
allowed_roles = ["admin", "student"]
self.assertRaises(PermissionDenied, is_allowed,
request, allowed_roles, True)
|
huahbo/pyamg
|
pyamg/util/utils.py
|
Python
|
mit
| 65,500 | 0.000031 |
"""General utility functions for pyamg"""
__docformat__ = "restructuredtext en"
from warnings import warn
import numpy as np
import scipy as sp
from scipy.sparse import isspmatrix, isspmatrix_csr, isspmatrix_csc, \
isspmatrix_bsr, csr_matrix, csc_matrix, bsr_matrix, coo_matrix, eye
from scipy.sparse.sputils import upcast
from pyamg.util.linalg import norm, cond, pinv_array
from scipy.linalg import eigvals
import pyamg.amg_core
__all__ = ['blocksize', 'diag_sparse', 'profile_solver', 'to_type',
'type_prep', 'get_diagonal', 'UnAmal', 'Coord2RBM',
'hierarchy_spectrum', 'print_table', 'get_block_diag', 'amalgamate',
'symmetric_rescaling', 'symmetric_rescaling_sa',
'relaxation_as_linear_operator', 'filter_operator', 'scale_T',
'get_Cpt_params', 'compute_BtBinv', 'eliminate_diag_dom_nodes',
'levelize_strength_or_aggregation',
'levelize_smooth_or_improve_candidates']
try:
from scipy.sparse._sparsetools import csr_scale_rows, bsr_scale_rows
from scipy.sparse._sparsetools import csr_scale_columns, bsr_scale_columns
except ImportError:
from scipy.sparse.sparsetools import csr_scale_rows, bsr_scale_rows
from scipy.sparse.sparsetools import csr_scale_columns, bsr_scale_columns
def blocksize(A):
# Helper Function: return the blocksize of a matrix
if isspmatrix_bsr(A):
return A.blocksize[0]
else:
return 1
def profile_solver(ml, accel=None, **kwargs):
"""
A quick solver to profile a particular multilevel object
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg)
"""
A = ml.levels[0].A
b = A * sp.rand(A.shape[0], 1)
residuals = []
if accel is None:
x_sol = ml.solve(b, residuals=residuals, **kwargs)
del x_sol
else:
def callback(x):
residuals.append(norm(np.ravel(b) - np.ravel(A*x)))
M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V'))
accel(A, b, M=M, callback=callback, **kwargs)
return np.asarray(residuals)
def diag_sparse(A):
"""
If A is a sparse matrix (e.g. csr_matrix or csc_matrix)
- return the diagonal of A as an array
Otherwise
- return a csr_matrix with A on the diagonal
Parameters
----------
A : sparse matrix or 1d array
General sparse matrix or array of diagonal entries
Returns
-------
B : array or sparse matrix
Diagonal sparse is returned as csr if A is dense otherwise return an
array of the diagonal
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import diag_sparse
>>> d = 2.0*np.ones((3,)).ravel()
>>> print diag_sparse(d).todense()
[[ 2. 0. 0.]
[ 0. 2. 0.]
[ 0. 0. 2.]]
"""
if isspmatrix(A):
return A.diagonal()
else:
if(np.ndim(A) != 1):
raise ValueError('input diagonal array expected to be 1d')
return csr_matrix((np.asarray(A), np.arange(len(A)),
np.arange(len(A)+1)), (len(A), len(A)))
def scale_rows(A, v, copy=True):
"""
Scale the sparse rows of a matrix
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
"""
v = np.ravel(v)
if isspmatrix_csr(A) or isspmatrix_bsr(A):
M, N = A.shape
if M != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data =
|
np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_rows(M, N, A.indptr, A.indices, A.data, v)
else:
|
R, C = A.blocksize
bsr_scale_rows(M/R, N/C, R, C, A.indptr, A.indices,
np.ravel(A.data), v)
return A
elif isspmatrix_csc(A):
return scale_columns(A.T, v)
else:
return scale_rows(csr_matrix(A), v)
def scale_columns(A, v, copy=True):
"""
Scale the sparse columns of a matrix
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]]
"""
v = np.ravel(v)
if isspmatrix_csr(A) or isspmatrix_bsr(A):
M, N = A.shape
if N != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_columns(M, N, A.indptr, A.indices, A.data, v)
else:
R, C = A.blocksize
bsr_scale_columns(M/R, N/C, R, C, A.indptr, A.indices,
np.ravel(A.data), v)
return A
elif isspmatrix_csc(A):
return scale_rows(A.T, v)
else:
return scale_rows(csr_matrix(A), v)
def symmetric_rescaling(A, copy=True):
"""
Scale the matrix symmetrically::
A = D^{-1/2} A D^{-1/2}
where D=diag(A).
The left multiplication is accomplished through scale_rows and the right
multiplication is done through scale columns.
Par
|
sameerparekh/pants
|
src/python/pants/version.py
|
Python
|
apache-2.0
| 320 | 0.003125 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.
|
md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_state
|
ment)
VERSION = '0.0.50'
|
newcontext/rubypython
|
spec/python_helpers/basics.py
|
Python
|
mit
| 409 | 0.036675 |
#!/usr/bin/env python
def iterate_list():
for item in [ 1, 2, 3 ]:
yield item
def identity(object):
return object
|
def simple_callback(callback, value):
return ca
|
llback(value)
def simple_generator(callback):
output = []
for i in callback():
output.append(i)
return output
def named_args(arg1, arg2):
return [arg1, arg2]
def expects_tuple(tvalue):
return isinstance(tvalue, tuple)
|
bradjasper/django-jsonfield
|
jsonfield/__init__.py
|
Python
|
mit
| 53 | 0 |
from .fields i
|
mport JSONField, JSO
|
NCharField # noqa
|
clubcapra/capra_seagoat
|
src/seagoat_ros_client/seagoat_ros_client.py
|
Python
|
gpl-3.0
| 5,086 | 0.004915 |
#! /usr/bin/env python
import rospy
import math
import numpy
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
from std_msgs.msg import Header
from multiprocessing import Pool
from multiprocessing import cpu_count
class Line:
def __init__(self, points):
assert isinstance(points, list)
self.points = points
class SeaGoatRosClient:
def __init__(self):
self.publisher = rospy.Publisher('VisionScan', LaserScan)
self.subscriber = rospy.Subscriber('ImageArray', Image, self.image_callback)
rospy.init_node('SeaGoatRosClient')
self.r = rospy.Rate(15)
self.vision_raw_scan = numpy.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [0, 0, 0, 0]])
#Init lines intersect
self.range_max = 0
self.angle_max = math.radians(180.0)
self.angle_increment = math.radians(0.5)
self.number_lines = int(self.angle_max/self.angle_increment)+1
self.init = False
self.max_pixel_dist = 0
self.pool = Pool(cpu_count()/2)
self.resolution = 3.0
self.image_height_centimeter = 300
#self._init_lines()
self.tasks = list()
self.id = 0
print self.vision_raw_scan
def publish_loop(self):
while not rospy.is_shutdown():
vision_scan = self.convert_array_to_laser_scan(self.vision_raw_scan)
if vision_scan is not None:
self.publisher.publish(vision_scan)
self.r.sleep()
def convert_array_to_laser_scan(self, vision_raw_scan):
if vision_raw_scan.size < 100:
return None
header = Header()
header.frame_id = "vision_scan"
#header.stamp = time()
laser_scan = LaserScan()
laser_scan.angle_min = 0.0
laser_scan.angle_max = self.angle_max
laser_scan.angle_increment = self.angle_increment
laser_scan.range_min = 0.0
laser_scan.range_max = self.range_max
#laser_scan.ranges = [0]*360
image_size = vision_raw_scan.shape
if len(image_size) == 3:
vision_raw_scan = cv2.cvtColor(vision_raw_scan, cv2.COLOR_BGR2GRAY)
image_size
|
= vision_raw_scan.shape
if self.init is False:
self._init_lines(image_size)
self.init = True
tasks = list()
for line in range(self.number_lines):
tasks.append((vision_raw_scan, self.lines[line]))
laser_scan.ranges = self.pool.map(_getObstacle, tasks)
|
#pool.close()
laser_scan.header = header
#laser_scan.scan_time = 1.0/5.0
#laser_scan.time_increment = 1.0/5.0
return laser_scan
def image_callback(self, msg):
image = CvBridge().imgmsg_to_cv2(msg)
self.vision_raw_scan = numpy.asanyarray(image)
#cv2.imshow("Image", image)
#cv2.waitKey(10)
def _init_lines(self, image_size):
origin_x = int(image_size[1] / 2)
origin_y = (image_size[0] - 1)-50
current_angle = 0
self.centimeter_by_pixel= float(self.image_height_centimeter)/float(image_size[0])
self.max_points = int(math.sqrt(math.pow(image_size[0], 2) + math.pow(image_size[1], 2)))
self.lines = numpy.ndarray((self.number_lines, self.max_points, 3), dtype=int)
for line_id in range(self.number_lines):
current_x = origin_x
current_y = origin_y
current_pixel_dist = 0
line = self.lines[line_id]
point_id = -1
while current_x < image_size[1] and current_y < image_size[0] and current_x >= 0 and current_y >= 0:
if (current_pixel_dist > 0):
point = line[point_id]
point[0] = current_x
point[1] = current_y
point[2] = int(current_pixel_dist*self.centimeter_by_pixel)
if point[2] > self.range_max:
self.range_max = point[2]
current_pixel_dist += self.resolution
current_x = int(current_pixel_dist * math.cos(current_angle)) + origin_x
current_y = int(current_pixel_dist * math.sin(-1 * current_angle)) + origin_y
point_id += 1
if point_id < self.max_points:
end_point = line[point_id]
end_point[0] = -1
end_point[1] = -1
end_point[2] = -1
#self.lines = self.lines + (line,)
#self.tasks.append(list([self.vision_raw_scan, line]))
current_angle += self.angle_increment
def _getObstacle(args):
#line = lines[i]
image = args[0]
line = args[1]
for point_id in range(len(line)):
point = line[point_id]
if point[0] == -1 and point[1] == -1 and point[2] == -1:
break
if image[point[1]][point[0]] > 0:
return float(point[2])/100.0
return 0.0
if __name__ == '__main__':
sgrc = SeaGoatRosClient()
sgrc.publish_loop()
|
creaktive/aircrack-ng
|
scripts/airgraph-ng/airgraphviz/libOuiParse.py
|
Python
|
gpl-2.0
| 8,152 | 0.008342 |
#!/usr/bin/env python
__author__ = 'Ben "TheX1le" Smith, Marfi'
__email__ = 'thex1le@gmail.com'
__website__= ''
__date__ = '04/26/2011'
__version__ = '2011.4.26'
__file__ = 'ouiParse.py'
__data__ = 'a class for dealing with the oui txt file'
"""
########################################
#
# This program and its support programs are free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation; version 2.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
#########################################
"""
import re
import sys
if sys.version_info[0] >= 3:
import requests
else:
import urllib
import os
import pdb
#this lib is crap and needs to be rewritten -Textile
if os.getenv('AIRGRAPH_HOME') is not None and os.path.isdir(os.getenv('AIRGRAPH_HOME')):
path=os.getenv('AIRGRAPH_HOME') + '/support/'
if not os.path.isdir(path):
try:
os.mkdir(path)
except:
raise Exception("Can't create destination directory (%s)!" % path)
elif os.path.isdir('./support/'):
path='./support/'
elif os.path.isdir('/usr/local/share/airgraph-ng/'):
path='/usr/local/share/airgraph-ng/'
elif os.path.isdir('/usr/share/airgraph-ng/'):
path='/usr/share/airgraph-ng/'
else:
raise Exception("Could not determine path, please, check your installation or set AIRGRAPH_HOME environment variable")
class macOUI_lookup:
"""
A class for deaing with OUIs and deterimining device type
"""
def __init__(self, oui=False):
"""
generate the two dictionaries and return them
"""
#a poor fix where if we have no file it trys to download it
self.ouiTxtUrl = "http://standards-oui.ieee.org/oui.txt"
self.ouiTxt = oui
if not oui or not os.path.isfile(self.ouiTxt):
self.ouiUpdate()
self.ouiTxt = path + "oui.txt"
self.last_error = None
self.identDeviceDict(path + 'ouiDevice.txt')
self.identDeviceDictWhacMac(path + 'whatcDB.csv')
self.ouiRaw = self.ouiOpen(self.ouiTxt)
self.oui_company = self.ouiParse() #dict where oui's are the keys to company names
self.company_oui = self.companyParse() #dict where company name is the key to oui's
def compKeyChk(self,name):
"""
check for valid company name key
"""
compMatch = re.compile(name,re.I)
if name in self.company_oui:
return True
for key in list(self.company_oui.keys()):
if compMatch.search(key) is not None:
return True
return False
def ouiKeyChk(self,name):
"""
check for a valid oui prefix
"""
if name in self.oui_company:
return True
else:
return False
def lookup_OUI(self,mac):
"""
Lookup a oui and return the company name
"""
if self.ouiKeyChk(mac) is not False:
return self.oui_company[mac]
else:
return False
def lookup_company(self,companyLst):
"""
look up a company name and return their OUI's
"""
oui = []
if type(companyLst) is list:
for name in companyLst:
compMatch = re.compile(name,re.I)
if name in self.company_oui:
oui.extend(self.company_oui[name])
else:
for key in self.company_oui:
if compMatch.search(key) is not None:
oui.extend(self.company_oui[key])
elif type(companyLst) is str:
if companyLst in self.company_oui:
oui = self.company_oui[companyLst]
else:
compMatch = re.compile(companyLst,re.I)
for key in self.company_oui:
if compMatch.search(key) is not None:
oui.extend(self.company_oui[key]) #return the oui for that key
return oui
def ouiOpen(self,fname,flag='R'):
"""
open the file and read it in
flag denotes use of read or readlines
"""
try:
with open(fname, "r") as fid:
if flag == 'RL':
text = fid.readlines()
elif flag == 'R':
text = fid.read()
return text
except IOError:
return False
def ouiParse(self):
"""
generate a oui to company lookup dict
"""
HexOui= {}
Hex = re.compile('.*(hex).*')
#matches the following example "00-00-00 (hex)\t\tXEROX CORPORATION"
ouiLines = self.ouiRaw.split("\n")
#split each company into a list one company per position
for line in ouiLines:
if Hex.search(line) is not None:
lineList = Hex.search(line).group().replace("\t"," ").split(" ")
#return the matched text and build a list out of it
HexOui[lineList[0].replace("-",":")] = lineList[2].strip()
#build a dict in the format of mac:company name
return HexOui
def companyParse(self):
"""
generate a company to oui lookup dict
"""
company_oui = {}
for oui in self.oui_company:
if self.oui_company[oui] in company_oui:
company_oui[self.oui_company[oui]].append(oui)
else:
company_oui[self.oui_company[oui]] = [oui]
return company_oui
def ouiUpdate(self):
"""
Grab the oui txt file off the ieee.org website
"""
try:
print(("Getting OUI file from %s to %s" %(self.ouiTxtUrl, path)))
if sys.version_info[0] == 2:
urllib.request.urlretrieve(self.ouiTxtUrl, path + "oui.txt")
else:
response = requests.get(self.ouiTxtUrl)
with open(path + "oui.txt", "wb") as file:
bytes_written = file.write(response.content)
print("Completed Successfully")
except Exception as error:
print(("Could not download file:\n %s\n Exiting a
|
irgraph-ng" %(error)))
sys.exit(0)
def identDeviceDict(self,fname):
"""
Create two dicts allowing device type lookup
one f
|
or oui to device and one from device to OUI group
"""
self.ouitodevice = {}
self.devicetooui = {}
data = self.ouiOpen(fname,'RL')
if data == False:
self.last_error = "Unable to open lookup file for parsing"
return False
for line in data:
dat = line.strip().split(',')
self.ouitodevice[dat[1]] = dat[0]
if dat[0] in list(self.devicetooui.keys()):
self.devicetooui[dat[0]].append(dat[1])
else:
self.devicetooui[dat[0]] = [dat[1]]
def identDeviceDictWhacMac(self,fname):
"""
Create two dicts allowing device type lookup from whatmac DB
one for oui to device and one from the device to OUI group
"""
self.ouitodeviceWhatmac3 = {}
self.ouitodeviceWhatmac = {}
self.devicetoouiWhacmac = {}
data = self.ouiOpen(fname,'RL')
if data == False:
self.last_error = "Unble to open lookup file for parsing"
return False
for line in data:
dat = line.strip().split(',')
dat[0] = dat[0].upper()
self.ouitodeviceWhatmac[dat[0]] = dat[1]
self.ouitodeviceWhatmac3[dat[0][0:8]] = dat[1] # a db to support the 3byte lookup from whatmac
if dat[1] in list(self.devicetoouiWhacmac.keys()):
self.devicetoouiWhacmac[dat[1]].append(dat[0])
else:
self.devicetoouiWhacmac[dat[1]] = [dat[0]]
|
guyemerson/sem-func
|
src/preprocess/wikiwoods_extractcore.py
|
Python
|
mit
| 6,001 | 0.002333 |
import sys, os, gzip, pickle
from xml.etree.ElementTree import ParseError
from traceback import print_tb
from multiprocessing import Pool # @UnresolvedImport
from pydmrs.components import RealPred, GPred
from pydmrs.core import ListDmrs as Dmrs
PROC = 50
def is_verb(pred):
# Ignore GPreds
if not isinstance(pred, RealPred):
return False
if pred.pos == 'v':
# For verbs in the lexicon, ignore modals
if pred.sense == 'modal':
return False
else:
return True
if pred.pos == 'u':
# For unknown words, use the PoS-tag
tag = pred.lemma.rsplit('/', 1)[-1]
if tag[0] == 'v':
return True
return False
def is_noun(pred):
# Assumes not a GPred
if pred.pos == 'n':
return True
if pred.pos == 'u':
# For unknown words, use the PoS-tag
tag = pred.lemma.rsplit('/', 1)[-1]
if tag[0] == 'n':
return True
return False
def find_sit(dmrs, node):
"""
Find if a node representations a situation
:param dmrs: a Dmrs object
:param node: a Node object
:return: (verb, agent, patient), realpred_only
or if not found: None, None
"""
# Only consider verbal nodes
if not is_verb(node.pred):
return None, None
# Output of the form (verb, agent, patient)
output = [node.pred, None, None]
# Record if arguments are RealPreds
noun_only = True
# Look for both ARG1 and ARG2
for i in (1,2):
try: # See if the argument is there
arglink = dmrs.get_out(node.nodeid, 'ARG'+str(i)).pop()
except KeyError:
continue
# Get argument's pred
end = dmrs[arglink.end]
pred = end.pred
# Deal with different pred types
if type(pred) == RealPred:
# Ignore coordinations
if pred.pos == 'c':
continue
# Record the pred
output[i] = pred
# Record if it's not a noun
if not is_noun(pred):
noun_only = False
else:
# Note that this pred is not a RealPred
noun_only = False
# Ignore coordinations
if pred == GPred('implicit_conj'):
continue
# Record information about
|
pronouns
elif pred == GPred('pron'):
pronstring = end.sorti
|
nfo['pers']
try:
pronstring += end.sortinfo['num']
except TypeError: # info is None
pass
try:
pronstring += end.sortinfo['gend']
except TypeError: # info is None
pass
output[i] = pronstring
elif pred == GPred('named'):
output[i] = end.carg
else:
output[i] = pred
# Check if an argument was found
if output[1] or output[2]:
return output, noun_only
else:
return None, None
def extract(xmlstring, sits, extra_sits, filename):
"""
Extract situations from a DMRS in XML form
:param xmlstring: the input XML
:param sits: the list of situations to append to
:param extra_sits: the list of extra situations (including GPreds) to append to
:param filename: the filename to log errors to
"""
try:
dmrs = Dmrs.loads_xml(xmlstring)
except ParseError as e: # badly formed XML
print("ParseError!")
with open('wikiwoods_extractcore.log', 'a') as f:
f.write(filename + ':\n' + xmlstring.decode() + '\n' + str(e) + '\n\n')
return None
# Look for situations
for n in dmrs.iter_nodes():
situation, realpred_only = find_sit(dmrs, n)
if situation:
if realpred_only:
sits.append(situation)
else:
extra_sits.append(situation)
# Directory of DMRSs, and directory to save triples
SOURCE = '/usr/groups/corpora/wikiwoods-1212-tmp/dmrs/'
TARGET = '/anfs/bigdisc/gete2/wikiwoods/core'
EXTRA = '/anfs/bigdisc/gete2/wikiwoods/core-extra'
if not os.path.exists(TARGET):
os.mkdir(TARGET)
if not os.path.exists(EXTRA):
os.mkdir(EXTRA)
def extract_file(filename):
"Extract all situations from a file"
newname = os.path.splitext(filename)[0]+'.pkl'
if os.path.exists(os.path.join(TARGET, newname)):
print('skipping '+filename)
return
try:
with gzip.open(os.path.join(SOURCE, filename),'rb') as f:
print(filename)
# List of situation triples
situations = []
extra_sits = []
# Each xml will span multiple lines,
# separated by an empty line
f.readline() # The first line is blank, for some reason
xml = b''
for line in f:
# Keep adding new lines until we get a blank line
if line != b'\n':
xml += line
else: # Once we've found a blank line, extract the DMRS
extract(xml, situations, extra_sits, filename)
# Reset the xml string
xml = b''
# If the file does not end with a blank line:
if xml != b'':
extract(xml, situations, extra_sits, filename)
# Save the triples in TARGET
with open(os.path.join(TARGET, newname), 'wb') as f:
pickle.dump(situations, f)
with open(os.path.join(EXTRA, newname), 'wb') as f:
pickle.dump(extra_sits, f)
except:
print("Error!")
with open('wikiwoods_extractcore.log', 'a') as f:
f.write(filename+'\n')
_, error, trace = sys.exc_info()
f.write(str(error)+'\n')
print_tb(trace, file=f)
f.write('\n\n')
# Process each file in SOURCE
all_files = sorted(os.listdir(SOURCE))
with Pool(PROC) as p:
p.map(extract_file, all_files)
|
rohitwaghchaure/frappe
|
frappe/utils/boilerplate.py
|
Python
|
mit
| 9,035 | 0.014721 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, re
from frappe.utils import touch_file, encode, cstr
def make_boilerplate(dest, app_name):
if not os.path.exists(dest):
print "Destination directory does not exist"
return
# app_name should be in snake_case
app_name = frappe.scrub(app_name)
hooks = frappe._dict()
hooks.app_name = app_name
app_title = hooks.app_name.replace("_", " ").title()
for key in ("App Title (default: {0})".format(app_title),
"App Description", "App Publisher", "App Email",
"App Icon (default 'octicon octicon-file-directory')",
"App Color (default 'grey')",
"App License (default 'MIT')"):
hook_key = key.split(" (")[0].lower().replace(" ", "_")
hook_val = None
while not hook_val:
hook_val = cstr(raw_input(key + ": "))
if not hook_val:
defaults = {
"app_title": app_title,
"app_icon": "octicon octicon-file-directory",
"app_color": "grey",
"app_license": "MIT"
}
if hook_key in defaults:
hook_val = defaults[hook_key]
if hook_key=="app_name" and hook_val.lower().replace(" ", "_") != hook_val:
print "App Name must be all lowercase and without spaces"
hook_val = ""
elif hook_key=="app_title" and not re.match("^(?![\W])[^\d_\s][\w -]+$", hook_val, re.UNICODE):
print "App Title should start with a letter and it can only consist of letters, numbers, spaces and underscores"
hook_val = ""
hooks[hook_key] = hook_val
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)),
with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "www"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"pages"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"includes"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "config"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"css"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"js"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "__init__.py"), "w") as f:
f.write(encode(init_template))
with open(os.path.join(dest, hooks.app_name, "MANIFEST.in"), "w") as f:
f.write(encode(manifest_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, ".gitignore"), "w") as f:
f.write(encode(gitignore_template.format(app_name = hooks.app_name)))
with open(os.path.join(dest, hooks.app_name, "setup.py"), "w") as f:
f.write(encode(setup_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, "requirements.txt"), "w") as f:
f.write("frappe")
with open(os.path.join(dest, hooks.app_name, "README.md"), "w") as f:
f.write(encode("## {0}\n\n{1}\n\n#### License\n\n{2}".format(hooks.app_title,
hooks.app_description, hooks.app_license)))
with open(os.path.join(dest, hooks.app_name, "license.txt"), "w") as f:
f.write(encode("License: " + hooks.app_license))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "modules.txt"), "w") as f:
f.write(encode(hooks.app_title))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "hooks.py"), "w") as f:
f.write(encode(hooks_template.format(**hooks)))
touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, "patches.txt"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "desktop.py"), "w") as f:
f.write(encode(desktop_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "docs.py"), "w") as f:
f.write(encode(docs_template.format(**hooks)))
print "'{app}' created at {path}".format(app=app_name, path=os.path.join(dest, app_name))
manifest_template = """include MANIFEST.in
include requirements.txt
include *.json
include *.md
include *.py
include *.txt
recursive-include {app_name} *.css
recursive-include {app_name} *.csv
recursive-include {app_name} *.html
recursive-include {app_name} *.ico
recursive-include {app_name} *.js
recursive-include {app_name} *.json
recursive-include {app_name} *.md
recursive-include {app_name} *.png
recursive-include {app_name} *.py
recursive-include {app_name} *.svg
recursive-include {app_name} *.txt
recursive-exclude {app_name} *.pyc"""
init_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.0.1'
"""
hooks_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "{app_name}"
app_title = "{app_title}"
app_publisher = "{app_publisher}"
app_description = "{app_description}"
app_icon = "{app_icon}"
app_color = "{app_color}"
app_email = "{app_email}"
app_license = "{app_license}"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/{app_name}/css/{app_name}.css"
# app_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js, css files in header of web template
# web_include_css = "/assets/{app_name}/css/{app_name}.css"
# web_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js in page
# page_js = {{"page" : "public/js/file.js"}}
# include js in doctype views
# doctype_js = {{"doctype" : "public/js/doctype.js"}}
# doctype_list_js = {{"doctype" : "public/js/doctype_list.js"}}
# doctype_tree_js = {{"doctype" : "public/js/doctype_tree.js"}}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {{
# "Role": "home_page"
# }}
# Website user home page (by function)
# get_website_user_home_page = "{app_name}.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "{app_name}.install.before_install"
# after_install = "{app_name}.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "{app_name}.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {{
# "Event": "frappe.desk.doctyp
|
e.event.event.get_permission_query_conditions",
# }}
#
# has_permission = {{
# "Event": "frappe.desk.doctype.event.ev
|
ent.has_permission",
# }}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {{
# "*": {{
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }}
# }}
# Scheduled Tasks
# ---------------
# scheduler_events = {{
# "all": [
# "{app_name}.tasks.all"
# ],
# "daily": [
# "{app_name}.tasks.daily"
# ],
# "hourly": [
# "{app_name}.tasks.hourly"
# ],
# "weekly": [
# "{app_name}.tasks.weekly"
# ]
# "monthly": [
# "{app_name}.tasks.monthly"
# ]
# }}
# Testing
# -------
# before_tests = "{app_name}.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {{
# "frappe.desk.doctype.event.event.get_events": "{app_name}.event.get_events"
# }}
"""
desktop_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{{
"module_name": "{app_title}",
"color": "{app_color}",
"icon": "{app_icon}",
"type": "module",
"label": _("{app_title}")
}}
]
"""
setup_template = """# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in {app_name}/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('{app_name}/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8'))
|
mikeshardmind/SinbadCogs
|
scheduler/converters.py
|
Python
|
mit
| 4,607 | 0.001954 |
from __future__ import annotations
import argparse
import dataclasses
from datetime import datetime, timedelta, timezone
from typing import NamedTuple, Optional, Tuple
from redbot.core.commands import BadArgument, Context
from .time_utils import parse_time, parse_timedelta
class NonNumeric(NamedTuple):
parsed: str
@classmethod
async def convert(cls, context: Context, argument: str):
if argument.isdigit():
raise BadArgument("Event names must contain at least 1 non-numeric value")
return cls(argument)
class NoExitParser(argparse.ArgumentParser):
def error(self, message):
raise BadArgument()
@dataclasses.dataclass()
class Schedule:
start: datetime
command: str
recur: Optional[timedelta] = None
quiet: bool = False
def to_tuple(self) -> Tuple[str, datetime, Optional[timedelta]]:
return self.command, self.start, self.recur
@classmethod
async def convert(cls, ctx: Context, argument: str):
start: datetime
command: Optional[str] = None
recur: Optional[timedelta] = None
command, *arguments = argument.split(" -- ")
if arguments:
argument = " -- ".join(arguments)
else:
command = None
parser = NoExitParser(description="Scheduler event parsing", add_help=False)
parser.add_argument(
"-q", "--quiet", action="store_true", dest="quiet", default=False
)
parser.add_argument("--every", nargs="*", dest="every", default=[])
if not command:
parser.add_argument("command", nargs="*")
at_or_in = parser.add_mutually_exclusive_group()
at_or_in.add_argument("--start-at", nargs="*", dest="at", default=[])
at_or_in.add_argument("--start-in", nargs="*", dest="in", default=[])
try:
vals = vars(parser.parse_args(argument.split(" ")))
except Exception as exc:
raise BadArgument() from exc
if not (vals["at"] or vals["in"]):
raise BadArgument("You must provide one of `--start-in` or `--start-at`")
if no
|
t command and not vals["command"]:
raise BadArgument("You have to provide a command to run")
command = command or " ".join(vals["command"])
for delta in ("in", "every"):
if vals[delta]:
parsed = parse_timedelta(" ".join(vals[delta]))
if not parsed:
raise BadArgument("I couldn't understand that time interval")
if delta == "in":
|
start = datetime.now(timezone.utc) + parsed
else:
recur = parsed
if recur.total_seconds() < 60:
raise BadArgument(
"You can't schedule something to happen that frequently, "
"I'll get ratelimited."
)
if vals["at"]:
try:
start = parse_time(" ".join(vals["at"]))
except Exception:
raise BadArgument("I couldn't understand that starting time.") from None
return cls(command=command, start=start, recur=recur, quiet=vals["quiet"])
class TempMute(NamedTuple):
reason: Optional[str]
start: datetime
@classmethod
async def convert(cls, ctx: Context, argument: str):
start: datetime
reason: str
parser = NoExitParser(description="Scheduler event parsing", add_help=False)
parser.add_argument("reason", nargs="*")
at_or_in = parser.add_mutually_exclusive_group()
at_or_in.add_argument("--until", nargs="*", dest="until", default=[])
at_or_in.add_argument("--for", nargs="*", dest="for", default=[])
try:
vals = vars(parser.parse_args(argument.split()))
except Exception as exc:
raise BadArgument() from exc
if not (vals["until"] or vals["for"]):
raise BadArgument("You must provide one of `--until` or `--for`")
reason = " ".join(vals["reason"])
if vals["for"]:
parsed = parse_timedelta(" ".join(vals["for"]))
if not parsed:
raise BadArgument("I couldn't understand that time interval")
start = datetime.now(timezone.utc) + parsed
if vals["until"]:
try:
start = parse_time(" ".join(vals["at"]))
except Exception:
raise BadArgument("I couldn't understand that unmute time.") from None
return cls(reason, start)
|
RagtagOpen/bidwire
|
bidwire/tests/test_knox_co_agenda_scraper.py
|
Python
|
mit
| 2,136 | 0.000468 |
import pytest
import responses
from document import Document
from scrapers.knox_tn_agendas_scraper import KnoxCoTNAgendaScraper
from . import common
from . import utils
class TestKnoxAgendaScraper(object):
session = None
page_str = ""
def test_get_docs_from_page(self):
scraper = KnoxCoTNAgendaScraper()
docs = scraper._get_docs_from_schedule(self.page_str)
assert len(docs) == 4
for doc in docs:
# All URLs should be absolute.
assert doc.url.startswith('https://')
actual_titles = [doc.title for doc in docs]
expected_titles = [
'June 28, 2017: BZA Agenda',
'June 26, 2017: Beer Board',
'June 19, 2017: Work Session',
'June 7, 2017: AGENDA COMMITTEE MEETING',
]
assert expected_titles == actual_titles
@responses.activate
def test_full_scraper(self):
self.session.query(Document).delete()
count = self.session.query(Document).count()
assert count == 0
responses.add(
responses.GET,
KnoxCoTNAgendaScraper.MEETING_SCHEDULE_U
|
RL,
body=self.page_str,
status=200,
match_querystring=True
)
scraper = KnoxCoTNAgendaScraper()
scraper.scrape(self.session)
docs = self.session.query(Document).all()
assert len(docs) == 4
expected_titles = {
'June 28, 2017: BZA Agenda',
'June 26, 2017: Beer Board',
'June 19, 2017: Work Sessi
|
on',
'June 7, 2017: AGENDA COMMITTEE MEETING',
}
for doc in docs:
assert doc.title in expected_titles
@classmethod
def setup_class(cls):
cls.session = common.Session()
with open(utils.get_abs_filename('knox-co-results-page.html'), 'r') as page:
cls.page_str = page.read()
@classmethod
def teardown_class(cls):
common.Session.remove()
def setup_method(self, test_method):
self.session.begin_nested()
def teardown_method(self, test_method):
self.session.rollback()
|
redbear/Espruino
|
scripts/common.py
|
Python
|
mpl-2.0
| 16,565 | 0.020344 |
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Reads board information from boards/BOARDNAME.py - used by build_board_docs,
# build_pininfo, and build_platform_config
# ----------------------------------------------------------------------------------------
import subprocess;
import re;
import json;
import sys;
import os;
import importlib;
silent = os.getenv("SILENT");
if silent:
class Discarder(object):
def write(self, text):
pass # do nothing
# now discard everything coming out of stdout
sys.stdout = Discarder()
# http://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
if "check_output" not in dir( subprocess ):
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
# Scans files for comments of the form /*JSON......*/
#
# Comments look like:
#
#/*JSON{ "type":"staticmethod|staticproperty|constructor|method|property|function|variable|class|library|idle|init|kill",
# // class = built-in class that does not require instantiation
# // library = built-in class that needs require('classname')
# // idle = function to run on idle regardless
# // init = function to run on initialisation
# // kill = function to run on deinitialisation
# "class" : "Double", "name" : "doubleToIntBits",
# "needs_parentName":true, // optional - if for a method, this makes the first 2 args parent+parentName (not just parent)
# "generate_full|generate|wrap" : "*(JsVarInt*)&x",
# "description" : " Convert the floating point value given into an integer representing the bits contained in it",
# "params" : [ [ "x" , "float|int|int32|bool|pin|JsVar|JsVarName|JsVarArray", "A floating point number"] ],
# // float - parses into a JsVarFloat which is passed to the function
# // int - parses into a JsVarInt which is passed to the function
# // int32 - parses into a 32 bit int
# // bool - parses into a boolean
# // pin - parses into a pin
# // JsVar - passes a JsVar* to the function (after skipping names)
# // JsVarArray - parses this AND ANY SUBSEQUENT ARGUMENTS into a JsVar of type JSV_ARRAY. THIS IS ALWAYS DEFINED, EVEN IF ZERO LENGTH. Currently it must be the only parameter
# "return" : ["int|float|JsVar", "The integer representation of x"],
# "return_object" : "ObjectName", // optional - used for tern's code analysis - so for example we can do hints for openFile(...).yyy
# "no_create_links":1 // optional - if this is set then hyperlinks are not created when this name is mentioned (good example = bit() )
# "not_real_object" : "anything", // optional - for classes, this means we shouldn't treat this as a built-in object, as internally it isn't stored in a JSV_OBJECT
# "prototype" : "Object", // optional - for classes, this is what their prototype is. It's particlarly helpful if not_real_object, because there is no prototype var in that case
# "check" : "jsvIsFoo(var)", // for classes - this is code that returns true if 'var' is of the given type
# "ifndef" : "SAVE_ON_FLASH", // if the given preprocessor macro is defined, don't implement this
# "ifdef" : "USE_LCD_FOO", // if the given preprocessor macro isn't defined, don't implement this
# "#if" : "A>2", // add a #if statement in the generated C file (ONLY if type==object)
#}*/
#
# description can be an array of strings as well as a simple string (in which case each element is separated by a newline),
# and adding ```sometext``` in the description surrounds it with HTML code tags
#
def get_jsondata(is_for_document, parseArgs = True, board = False):
scriptdir = os.path.dirname (os.path.realpath(__file__))
print("Script location "+scriptdir)
os.chdir(scriptdir+"/..")
jswraps = []
defines = []
if board and ("build" in board.info) and ("defines" in board.in
|
fo["build"]):
for i in board.info["build"]["defines"]:
print("Got define from board: " + i);
defines.append(i)
if parseArgs and len(sys.argv)>1:
print("Using files from command line")
for i in range(1,len(sys.argv)):
arg = sys.argv[i]
if a
|
rg[0]=="-":
if arg[1]=="D":
defines.append(arg[2:])
elif arg[1]=="B":
board = importlib.import_module(arg[2:])
if "usart" in board.chip: defines.append("USART_COUNT="+str(board.chip["usart"]));
if "spi" in board.chip: defines.append("SPI_COUNT="+str(board.chip["spi"]));
if "i2c" in board.chip: defines.append("I2C_COUNT="+str(board.chip["i2c"]));
if "USB" in board.devices: defines.append("defined(USB)=True");
else: defines.append("defined(USB)=False");
elif arg[1]=="F":
"" # -Fxxx.yy in args is filename xxx.yy, which is mandatory for build_jswrapper.py
else:
print("Unknown command-line option")
exit(1)
else:
jswraps.append(arg)
else:
print("Scanning for jswrap.c files")
jswraps = subprocess.check_output(["find", ".", "-name", "jswrap*.c"]).strip().split("\n")
if len(defines)>1:
print("Got #DEFINES:")
for d in defines: print(" "+d)
jsondatas = []
for jswrap in jswraps:
# ignore anything from archives
if jswrap.startswith("./archives/"): continue
# now scan
print("Scanning "+jswrap)
code = open(jswrap, "r").read()
if is_for_document and "DO_NOT_INCLUDE_IN_DOCS" in code:
print("FOUND 'DO_NOT_INCLUDE_IN_DOCS' IN FILE "+jswrap)
continue
for comment in re.findall(r"/\*JSON.*?\*/", code, re.VERBOSE | re.MULTILINE | re.DOTALL):
charnumber = code.find(comment)
linenumber = 1+code.count("\n", 0, charnumber)
# Strip off /*JSON .. */ bit
comment = comment[6:-2]
endOfJson = comment.find("\n}")+2;
jsonstring = comment[0:endOfJson];
description = comment[endOfJson:].strip();
# print("Parsing "+jsonstring)
try:
jsondata = json.loads(jsonstring)
if len(description): jsondata["description"] = description;
jsondata["filename"] = jswrap
jsondata["include"] = jswrap[:-2]+".h"
jsondata["githublink"] = "https://github.com/espruino/Espruino/blob/master/"+jswrap+"#L"+str(linenumber)
dropped_prefix = "Dropped "
if "name" in jsondata: dropped_prefix += jsondata["name"]+" "
elif "class" in jsondata: dropped_prefix += jsondata["class"]+" "
drop = False
if not is_for_document:
if ("ifndef" in jsondata) and (jsondata["i
|
polyaxon/polyaxon
|
core/polyaxon/polyflow/events/__init__.py
|
Python
|
apache-2.0
| 6,731 | 0.001931 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import fields, validate
import polyaxon_sdk
from polyaxon.contexts import refs as contexts_refs
from polyaxon.lifecycle import V1Statuses
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class V1EventKind(polyaxon_sdk.V1EventKind):
events_statuses_mapping = {
polyaxon_sdk.V1EventKind.RUN_STATUS_CREATED: V1Statuses.CREATED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RESUMING: V1Statuses.RESUMING,
polyaxon_sdk.V1EventKind.RUN_STATUS_ON_SCHEDULE: V1Statuses.ON_SCHEDULE,
polyaxon_sdk.V1EventKind.RUN_STATUS_COMPILED: V1Statuses.COMPILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_QUEUED: V1Statuses.QUEUED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SCHEDULED: V1Statuses.SCHEDULED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STARTING: V1Statuses.STARTING,
polyaxon_sdk.V1EventKind.RUN_STATUS_RUNNING: V1Statuses.RUNNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_PROCESSING: V1Statuses.PROCESSING,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPING: V1Statuses.STOPPING,
polyaxon_sdk.V1EventKind.RUN_STATUS_FAILED: V1Statuses.FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPED: V1Statuses.STOPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SUCCEEDED: V1Statuses.SUCCEEDED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SKIPPED: V1Statuses.SKIPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_WARNING: V1Statuses.WARNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNSCHEDULABLE: V1Statuses.UNSCHEDULABLE,
polyaxon_sdk.V1EventKind.RUN_STATUS_UPSTREAM_FAILED: V1Statuses.UPSTREAM_FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RETRYING: V1Statuses.RETRYING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNKNOWN: V1Statuses.UNKNOWN,
polyaxon_sdk.V1EventKind.RUN_STATUS_DONE: V1Statuses.DONE,
}
class EventTriggerSchema(BaseCamelSchema):
kinds = fields.List(
fields.Str(validate=validate.OneOf(V1EventKind.allowable_values)),
required=True,
)
ref = fields.Str(required=True)
@staticmethod
def schema_config():
return V1EventTrigger
class V1EventTrigger(BaseConfig, contexts_refs.RefMixin, polyaxon_sdk.V1EventTrigger):
"""Events are an advanced triggering logic that users can take advantage of in addition to:
* Manual triggers via API/CLI/UI.
* Time-based triggers with schedules and crons.
* Upstream triggers with upstream runs or upstream ops in DAGs.
Events can be attached to an operation in the context of a DAG
to extend the simple trigger process,
this is generally important when the user defines a dependency between two operations
and needs a run to start as soon as
the upstream run generates an event instead of waiting until it
|
reaches a final state.
For instance, a usual use-c
|
ase is to start a tensorboard as soon as training starts.
In that case the downstream operation will watch for the `running` status.
Events can be attached as well to a single operation
to wait for an internal alert or external events,
for instance if a user integrates Polyaxon with Github,
they can trigger training as soon as Polyaxon is notified that a new git commit was created.
Polyaxon provides several internal and external events that users
can leverage to fully automate their usage of the platform:
* "run_status_created"
* "run_status_resuming"
* "run_status_compiled"
* "run_status_queued"
* "run_status_scheduled"
* "run_status_starting"
* "run_status_initializing"
* "run_status_running"
* "run_status_processing"
* "run_status_stopping"
* "run_status_failed"
* "run_status_stopped"
* "run_status_succeeded"
* "run_status_skipped"
* "run_status_warning"
* "run_status_unschedulable"
* "run_status_upstream_failed"
* "run_status_retrying"
* "run_status_unknown"
* "run_status_done"
* "run_approved_actor"
* "run_invalidated_actor"
* "run_new_artifacts"
* "connection_git_commit"
* "connection_dataset_version"
* "connection_registry_image"
* "alert_info"
* "alert_warning"
* "alert_critical"
* "model_version_new_metric"
* "project_custom_event"
* "org_custom_event"
Args:
kinds: List[str]
ref: str
> **Important**: Currently only events with prefix `run_status_*` are supported.
## YAML usage
```yaml
>>> events:
>>> ref: {{ ops.upstream-operation }}
>>> kinds: [run_status_running]
```
```yaml
>>> event:
>>> ref: {{ connections.git-repo-connection-name }}
>>> kinds: [connection_git_commit]
```
## Python usage
```python
>>> from polyaxon.polyflow import V1EventKind, V1EventTrigger
>>> event1 = V1EventTrigger(
>>> ref="{{ ops.upstream-operation }}",
>>> kinds=[V1EventTrigger.RUN_STATUS_RUNNING],
>>> )
>>> event2 = V1EventTrigger(
>>> ref="{{ connections.git-repo-connection-name }}",
>>> kinds=[V1EventTrigger.CONNECTION_GIT_COMMIT],
>>> )
```
## Fields
### kinds
The trigger event kinds to watch, if any event is detected the operation defining the `events`
section will be initiated.
```yaml
>>> event:
>>> kinds: [run_status_running, run_status_done]
```
> **Note**: Similar to trigger in DAGs, after an operation is initiated,
> it will still have to validate the rest of the Polyaxonfile,
> i.e. conditions, contexts, connections, ...
### ref
A valid reference that Polyaxon can resolve the objects that will send the events to watch for.
All supported events are prefixed with the object reference that can send such events.
The `run_*` events can be referenced both by `runs.UUID` or
`ops.OPERATION_NAME` if defined in the context of a DAG.
```yaml
>>> event:
>>> ref: ops.upstream_operation_name
```
"""
IDENTIFIER = "event_trigger"
SCHEMA = EventTriggerSchema
REDUCED_ATTRIBUTES = [
"ref",
]
|
JoostvanPinxten/ConstraintPuzzler
|
constraints/__init__.py
|
Python
|
mit
| 296 | 0.006757 |
__all__ = ['Constraint', 'ConstraintGroup', 'TotalSumValueConstraint', 'UniqueVa
|
lueConstraint']
from .constraint import Constraint
from .constraintgroup i
|
mport ConstraintGroup
from .totalsumvalueconstraint import TotalSumValueConstraint
from .uniquevalueconstraint import UniqueValueConstraint
|
LeZhang2016/openthread
|
tests/toranj/test-603-channel-manager-announce-recovery.py
|
Python
|
bsd-3-clause
| 4,783 | 0.005854 |
#!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistributi
|
on and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with t
|
he distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from wpan import verify
import wpan
import time
#-----------------------------------------------------------------------------------------------------------------------
# Test description: Orphaned node attach through MLE Announcement
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print '-' * 120
print 'Starting \'{}\''.format(test_name)
def verify_channel(nodes, new_channel, wait_time=20):
"""
This function checks the channel on a given list of `nodes` and verifies that all nodes
switch to a given `new_channel` (as int) within certain `wait_time` (int and in seconds)
"""
start_time = time.time()
while not all([ (new_channel == int(node.get(wpan.WPAN_CHANNEL), 0)) for node in nodes ]):
if time.time() - start_time > wait_time:
print 'Took too long to switch to channel {} ({}>{} sec)'.format(new_channel, time.time() - start_time,
wait_time)
exit(1)
time.sleep(0.1)
#-----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
router = wpan.Node()
c1 = wpan.Node()
c2 = wpan.Node()
all_nodes = [router, c1, c2]
#-----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
#-----------------------------------------------------------------------------------------------------------------------
# Build network topology
router.form('announce-tst', channel=11)
c1.join_node(router, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c2.join_node(router, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c1.set(wpan.WPAN_POLL_INTERVAL, '500')
c2.set(wpan.WPAN_POLL_INTERVAL, '500')
c1.set(wpan.WPAN_THREAD_DEVICE_MODE,'5')
c2.set(wpan.WPAN_THREAD_DEVICE_MODE,'5')
#-----------------------------------------------------------------------------------------------------------------------
# Test implementation
# Reset c2 and keep it in detached state
c2.set('Daemon:AutoAssociateAfterReset', 'false')
c2.reset();
# Switch the rest of network to channel 26
router.set(wpan.WPAN_CHANNEL_MANAGER_NEW_CHANNEL, '26')
verify_channel([router, c1], 26)
# Now re-enable c2 and verify that it does attach to router and is on channel 26
# c2 would go through the ML Announce recovery.
c2.set('Daemon:AutoAssociateAfterReset', 'true')
c2.reset();
verify(int(c2.get(wpan.WPAN_CHANNEL), 0) == 11)
# wait for 20s for c2 to be attached/associated
start_time = time.time()
wait_time = 20
while not c2.is_associated():
if time.time() - start_time > wait_time:
print 'Took too long to recover through ML Announce ({}>{} sec)'.format(time.time() - start_time, wait_time)
exit(1)
time.sleep(0.1)
# Check that c2 did attach and is on channel 26.
verify(int(c2.get(wpan.WPAN_CHANNEL), 0) == 26)
#-----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print '\'{}\' passed.'.format(test_name)
|
alirizakeles/zato
|
code/zato-scheduler/src/zato/scheduler/__init__.py
|
Python
|
gpl-3.0
| 238 | 0.004202 |
# -*- coding
|
: utf-8 -*-
"""
Copyright (C) 2016 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_fun
|
ction, unicode_literals
|
NeCTAR-RC/ceilometer
|
ceilometer/api/acl.py
|
Python
|
apache-2.0
| 2,172 | 0 |
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Access Control Lists (ACL's) control access the API server."""
from ceilometer.openstack.common import policy
from keystoneclient.middleware import auth_token
from oslo.config import cfg
_ENFORCER = None
OPT_GROUP_NAME = 'keystone_authtoken'
def register_opts(conf):
"""Register keystoneclient middleware options
"""
conf.register_opts(auth_token.opts,
group=OPT_GROUP_NAME)
auth_token.CONF = conf
register_opts(cfg.CONF)
def install(app, conf):
"""Install ACL check on application."""
return auth_token.AuthProtocol(app,
conf=dict(conf.get(OPT_GROUP_NAME)))
def get_limited_to(headers):
"""Return the user and project the request should be limited to.
:param headers: HTTP headers dictionary
:re
|
turn: A tuple of (user, project), set to None if there's no limit on
one of these.
"""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer()
if not _ENFORCER.enforce('context_is_admin',
{},
{'roles': headers.get('X-Roles', "").split(",")}):
return headers.g
|
et('X-User-Id'), headers.get('X-Project-Id')
return None, None
def get_limited_to_project(headers):
"""Return the project the request should be limited to.
:param headers: HTTP headers dictionary
:return: A project, or None if there's no limit on it.
"""
return get_limited_to(headers)[1]
|
paradiseOffice/Bash_and_Cplus-plus
|
CPP/full_examples/pyqt/chap08/addeditmoviedlg_ans.py
|
Python
|
gpl-2.0
| 3,155 | 0.001268 |
#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the
|
Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) a
|
ny later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from PyQt4.QtCore import (QDate, QString, Qt, SIGNAL, pyqtSignature)
from PyQt4.QtGui import (QApplication, QDialog, QDialogButtonBox)
import moviedata_ans as moviedata
import ui_addeditmoviedlg_ans as ui_addeditmoviedlg
class AddEditMovieDlg(QDialog,
ui_addeditmoviedlg.Ui_AddEditMovieDlg):
def __init__(self, movies, movie=None, parent=None):
super(AddEditMovieDlg, self).__init__(parent)
self.setupUi(self)
self.movies = movies
self.movie = movie
self.acquiredDateEdit.setDisplayFormat(moviedata.DATEFORMAT)
if movie is not None:
self.titleLineEdit.setText(movie.title)
self.yearSpinBox.setValue(movie.year)
self.minutesSpinBox.setValue(movie.minutes)
self.acquiredDateEdit.setDate(movie.acquired)
self.acquiredDateEdit.setEnabled(False)
self.locationLineEdit.setText(movie.location)
self.notesTextEdit.setPlainText(movie.notes)
self.notesTextEdit.setFocus()
self.buttonBox.button(QDialogButtonBox.Ok).setText(
"&Accept")
self.setWindowTitle("My Movies - Edit Movie")
else:
today = QDate.currentDate()
self.acquiredDateEdit.setDateRange(today.addDays(-5),
today)
self.acquiredDateEdit.setDate(today)
self.titleLineEdit.setFocus()
self.on_titleLineEdit_textEdited(QString())
@pyqtSignature("QString")
def on_titleLineEdit_textEdited(self, text):
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(
not self.titleLineEdit.text().isEmpty())
def accept(self):
title = self.titleLineEdit.text()
year = self.yearSpinBox.value()
minutes = self.minutesSpinBox.value()
location = self.locationLineEdit.text()
notes = self.notesTextEdit.toPlainText()
if self.movie is None:
acquired = self.acquiredDateEdit.date()
self.movie = moviedata.Movie(title, year, minutes,
acquired, location, notes)
self.movies.add(self.movie)
else:
self.movies.updateMovie(self.movie, title, year,
minutes, location, notes)
QDialog.accept(self)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = AddEditMovieDlg(0)
form.show()
app.exec_()
|
OpenEngeneeringTaskForce/OETFDev
|
pycore/submit_util.py
|
Python
|
mit
| 1,401 | 0.004996 |
'''
Created on Aug 21, 2014
@author: Dean4Devil
'''
import mysql.connector
from pycore.sql_util import MySQLHelper
class Sub
|
mitTree():
'A tree of all submits to that standard. I.e. OpenDriver is a tree, OpenDriver 0.2 is a submit.'
def __init__(self, identifier):
'Create a new Tree in memory.'
self.sql_helper
|
= MySQLHelper("oetf_submits")
if self.sql_helper.check_exists(identifier):
self.tree = self.sql_helper.query_data(identifier, "*", delimiter="", order="id", row_num=0)
else:
# First submit in that tree. Table does not exist yet.
table = (
"CREATE TABLE IF NOT EXISTS `{}` (".format(identifier),
"`id` int(11) NOT NULL AUTO_INCREMENT,",
"`version` varchar(32) COLLATE utf8mb4_bin NOT NULL",
"`comment` text COLLATE utf8mb4_bin NOT NULL,",
"`content` text COLLATE utf8mb4_bin NOT NULL,",
"`published_date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP",
"PRIMARY KEY (`id`)",
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1 ;")
con = self.sql_helper.return_con()
cur = con.cursor()
cur.execute(table)
self.tree = []
cur.close()
con.close()
class Submit():
'Submit element'
|
rbernand/transfert
|
tests/unit/test_copy.py
|
Python
|
mit
| 1,619 | 0 |
import filecmp
from transfert import Resource
from transfert.actions import copy
def estimate_nb_cycles(len_data, chunk_size):
return (len_data // chunk_size) + [0, 1][(len_data % chunk_size) > 0]
def test_simple_local_copy(tmpdir):
src = tmpdir.join('alpha')
dst = tmpdir.join('beta')
src.writ
|
e('some data')
assert src.check()
assert not dst.check()
copy(Resource('file://' + src.strpath),
Resource('file://' + dst.strpath))
assert src.check()
assert dst.check()
assert filecmp.cmp(src.strpath, dst.strpath)
def test_simple_local_copy_with_callback(tmpdir):
def wrapper(size):
nonlocal count
count += 1
count = 0
src = tmpdir.join('alpha')
dst = tmpdir.join('beta')
data = b'some data'
|
src.write(data)
chunk_size = 1
assert src.check()
assert not dst.check()
copy(Resource('file://' + src.strpath),
Resource('file://' + dst.strpath,),
size=chunk_size,
callback_freq=1,
callback=wrapper)
assert src.check()
assert dst.check()
assert filecmp.cmp(src.strpath, dst.strpath)
assert count == estimate_nb_cycles(len(data), chunk_size)
dst.remove()
count = 0
chunk_size = 2
assert src.check()
assert not dst.check()
copy(Resource('file://' + src.strpath),
Resource('file://' + dst.strpath,),
size=chunk_size,
callback_freq=1,
callback=wrapper)
assert src.check()
assert dst.check()
assert filecmp.cmp(src.strpath, dst.strpath)
assert count == estimate_nb_cycles(len(data), chunk_size)
|
mecwerks/fofix
|
src/audio/Microphone.py
|
Python
|
gpl-2.0
| 8,551 | 0.003158 |
#####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X (FoFiX) #
# Copyright (C) 2009 Team FoFiX #
# 2009 John Stumpo #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from util import Log
from core import Audio
import math
import numpy as np
try:
import pyaudio
from audio import pypitch
supported = True
except ImportError:
Log.warn('Missing pyaudio or pypitch - microphone support will not be possible')
supported = False
from core.Task import Task
from core.Language import _
if supported:
pa = pyaudio.PyAudio()
# Precompute these in the interest of saving CPU time in the note analysis loop
LN_2 = math.log(2.0)
LN_440 = math.log(440.0)
#stump: return dictionary mapping indices to device names
# -1 is magic for the default device and will be replaced by None when actually opening the mic.
def getAvailableMics():
result = {-1: _('[Default Microphone
|
]')}
for devnum in range(pa.get_device_count()):
devinfo = pa.get_device_info_by_index(devnum)
if devinfo['maxInputChannels'] > 0:
result[devnum] = devinfo['name']
return result
class Microphone(Task):
def __init__(self, engine, controlnu
|
m, samprate=44100):
Task.__init__(self)
self.engine = engine
self.controlnum = controlnum
devnum = self.engine.input.controls.micDevice[controlnum]
if devnum == -1:
devnum = None
self.devname = pa.get_default_input_device_info()['name']
else:
self.devname = pa.get_device_info_by_index(devnum)['name']
self.mic = pa.open(samprate, 1, pyaudio.paFloat32, input=True, input_device_index=devnum, start=False)
self.analyzer = pypitch.Analyzer(samprate)
self.mic_started = False
self.lastPeak = 0
self.detectTaps = True
self.tapStatus = False
self.tapThreshold = -self.engine.input.controls.micTapSensitivity[controlnum]
self.passthroughQueue = []
passthroughVolume = self.engine.input.controls.micPassthroughVolume[controlnum]
if passthroughVolume > 0.0:
Log.debug('Microphone: creating passthrough stream at %d%% volume' % round(passthroughVolume * 100))
self.passthroughStream = Audio.MicrophonePassthroughStream(engine, self)
self.passthroughStream.setVolume(passthroughVolume)
else:
Log.debug('Microphone: not creating passthrough stream')
self.passthroughStream = None
def __del__(self):
self.stop()
self.mic.close()
def start(self):
if not self.mic_started:
self.mic_started = True
self.mic.start_stream()
self.engine.addTask(self, synchronized=False)
Log.debug('Microphone: started %s' % self.devname)
if self.passthroughStream is not None:
Log.debug('Microphone: starting passthrough stream')
self.passthroughStream.play()
def stop(self):
if self.mic_started:
if self.passthroughStream is not None:
Log.debug('Microphone: stopping passthrough stream')
self.passthroughStream.stop()
self.engine.removeTask(self)
self.mic.stop_stream()
self.mic_started = False
Log.debug('Microphone: stopped %s' % self.devname)
# Called by the Task machinery: pump the mic and shove the data through the analyzer.
def run(self, ticks):
while self.mic.get_read_available() > 1024:
try:
chunk = self.mic.read(1024)
except IOError, e:
if e.args[1] == pyaudio.paInputOverflowed:
Log.notice('Microphone: ignoring input buffer overflow')
chunk = '\x00' * 4096
else:
raise
if self.passthroughStream is not None:
self.passthroughQueue.append(chunk)
self.analyzer.input(np.frombuffer(chunk, dtype=np.float32))
self.analyzer.process()
pk = self.analyzer.getPeak()
if self.detectTaps:
if pk > self.tapThreshold and pk > self.lastPeak + 5.0:
self.tapStatus = True
self.lastPeak = pk
# Get the amplitude (in dB) of the peak of the most recent input window.
def getPeak(self):
return self.analyzer.getPeak()
# Get the microphone tap status.
# When a tap occurs, it is remembered until this function is called.
def getTap(self):
retval = self.tapStatus
self.tapStatus = False
return retval
def getFormants(self):
return self.analyzer.getFormants()
# Get the note currently being sung.
# Returns None if there isn't one or a pypitch.Tone object if there is.
def getTone(self):
return self.analyzer.findTone()
# Get the note currently being sung, as an integer number of semitones above A.
# The frequency is rounded to the nearest semitone, then shifted by octaves until
# the result is between 0 and 11 (inclusive). Returns None is no note is being sung.
def getSemitones(self):
tone = self.analyzer.findTone()
if tone is None:
return tone
return int(round((math.log(tone.freq) - LN_440) * 12.0 / LN_2) % 12)
# Work out how accurately the note (passed in as a MIDI note number) is being
# sung. Return a float in the range [-6.0, 6.0] representing the number of
# semitones difference there is from the nearest occurrence of the note. The
# octave doesn't matter. Or return None if there's no note being sung.
def getDeviation(self, midiNote):
tone = self.analyzer.findTone()
if tone is None:
return tone
# Convert to semitones from A-440.
semitonesFromA440 = (math.log(tone.freq) - LN_440) * 12.0 / LN_2
# midiNote % 12 = semitones above C, which is 3 semitones above A
semitoneDifference = (semitonesFromA440 - 3.0) - float(midiNote % 12)
# Adjust to the proper range.
acc = math.fmod(semitoneDifference, 12.0)
if acc > 6.0:
acc -= 12.0
elif acc < -6.0:
acc += 12.0
return acc
else:
def getAvailableMics():
return {-1: _('[Microph
|
sgraham/nope
|
tools/gyp/test/rules-dirname/gyptest-dirname.py
|
Python
|
bsd-3-clause
| 1,475 | 0.00339 |
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"
|
""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
import os
import sys
test = TestGyp.TestGyp(formats=['make', 'ninja', 'android', 'xcode', 'msvs'])
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir=
|
'relocate/src')
expect = """\
no dir here
hi c
hello baz
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('gencc_int_output', chdir=chdir, stdout=expect)
if test.format == 'msvs':
test.run_built_executable('gencc_int_output_external', chdir=chdir,
stdout=expect)
test.must_match('relocate/src/subdir/foo/bar/baz.dirname',
os.path.join('foo', 'bar'))
test.must_match('relocate/src/subdir/a/b/c.dirname',
os.path.join('a', 'b'))
# FIXME the xcode and make generators incorrectly convert RULE_INPUT_PATH
# to an absolute path, making the tests below fail!
if test.format != 'xcode' and test.format != 'make':
test.must_match('relocate/src/subdir/foo/bar/baz.path',
os.path.join('foo', 'bar', 'baz.printvars'))
test.must_match('relocate/src/subdir/a/b/c.path',
os.path.join('a', 'b', 'c.printvars'))
test.pass_test()
|
oferb/OpenTrains
|
webserver/opentrain/opentrain/print_settings.py
|
Python
|
bsd-3-clause
| 230 | 0.021739 |
import sys
old_stdout = sys.stdout
sys.stdout = open('/dev/null','w')
import setti
|
ngs
sys.stdout = old_stdout
if __name__ == '__main__':
if sys.a
|
rgv[1] == '-env':
print 'export DATA_DIR="%s"' % (settings.DATA_DIR)
|
andydrop/ludicode
|
GameServers/__init__.py
|
Python
|
gpl-3.0
| 60 | 0 |
from .gameserver i
|
mport Game
from .example import Tic
|
TacToe
|
marduk191/plugin.video.movie25
|
resources/libs/resolvers.py
|
Python
|
gpl-3.0
| 60,306 | 0.014012 |
# -*- coding: cp1252 -*-
import urllib,urllib2,re,cookielib,string,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from t0mm0.common.net import Net as net
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
datapath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))
elogo = xbmc.translatePath('special://home/addons/plugin.video.movie25/resources/art/bigx.png')
class ResolverError(Exception):
def __init__(self, value, value2):
value = value
|
value2 = value2
def __str__(self):
return repr(value,value2)
def resolve_url(url, filename = False):
stream_url = False
if(url):
try:
url = url.split('"')[0]
match = re.search('xoxv(.+?)xoxe(.+?)xoxc',url)
print "host "+url
if(match):
import urlresolver
source = urlresolver.H
|
ostedMediaFile(host=match.group(1), media_id=match.group(2))
if source:
stream_url = source.resolve()
elif re.search('billionuploads',url,re.I):
stream_url=resolve_billionuploads(url, filename)
elif re.search('180upload',url,re.I):
stream_url=resolve_180upload(url)
elif re.search('veehd',url,re.I):
stream_url=resolve_veehd(url)
elif re.search('vidto',url,re.I):
stream_url=resolve_vidto(url)
elif re.search('epicshare',url,re.I):
stream_url=resolve_epicshare(url)
elif re.search('lemuploads',url,re.I):
stream_url=resolve_lemupload(url)
elif re.search('mightyupload',url,re.I):
stream_url=resolve_mightyupload(url)
elif re.search('hugefiles',url,re.I):
stream_url=resolve_hugefiles(url)
elif re.search('megarelease',url,re.I):
stream_url=resolve_megarelease(url)
elif re.search('movreel',url,re.I):
stream_url=resolve_movreel(url)
elif re.search('bayfiles',url,re.I):
stream_url=resolve_bayfiles(url)
elif re.search('nowvideo',url,re.I):
stream_url=resolve_nowvideo(url)
elif re.search('novamov',url,re.I):
stream_url=resolve_novamov(url)
elif re.search('vidspot',url,re.I):
stream_url=resolve_vidspot(url)
elif re.search('videomega',url,re.I):
stream_url=resolve_videomega(url)
elif re.search('youwatch',url,re.I):
stream_url=resolve_youwatch(url)
elif re.search('vk.com',url,re.I):
stream_url=resolve_VK(url)
elif re.search('(?i)(firedrive|putlocker)',url):
stream_url=resolve_firedrive(url)
elif re.search('project-free-upload',url,re.I):
stream_url=resolve_projectfreeupload(url)
elif re.search('yify.tv',url,re.I):
stream_url=resolve_yify(url)
elif re.search('mail.ru',url,re.I):
stream_url=resolve_mailru(url)
elif re.search('youtube',url,re.I):
try:url=url.split('watch?v=')[1]
except:
try:url=url.split('com/v/')[1]
except:url=url.split('com/embed/')[1]
stream_url='plugin://plugin.video.youtube/?action=play_video&videoid=' +url
else:
import urlresolver
print "host "+url
source = urlresolver.HostedMediaFile(url)
if source:
stream_url = source.resolve()
if isinstance(stream_url,urlresolver.UrlResolver.unresolvable):
showUrlResoverError(stream_url)
stream_url = False
else:
stream_url=url
try:
stream_url=stream_url.split('referer')[0]
stream_url=stream_url.replace('|','')
except:
pass
except ResolverError as e:
#logerror(str(e))
#showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR] ' + e.value2,'[B][COLOR red]'+e.value+'[/COLOR][/B]',5000, elogo)
try:
import urlresolver
source = urlresolver.HostedMediaFile(url)
if source:
stream_url = source.resolve()
if isinstance(stream_url,urlresolver.UrlResolver.unresolvable):
showUrlResoverError(stream_url)
stream_url = False
except Exception as e:
logerror(str(e))
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]'+str(e)+'[/COLOR][/B]',5000, elogo)
except Exception as e:
logerror(str(e))
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]'+str(e)+'[/COLOR][/B]',5000, elogo)
else:
logerror("video url not valid")
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]video url not valid[/COLOR][/B]',5000, elogo)
if stream_url and re.search('\.(zip|rar|7zip)$',stream_url,re.I):
logerror("video url found is an archive")
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]video url found is an archive[/COLOR][/B]',5000, elogo)
return False
return stream_url
def showUrlResoverError(unresolvable):
logerror(str(unresolvable.msg))
showpopup('[B]UrlResolver Error[/B]','[COLOR red]'+str(unresolvable.msg)+'[/COLOR]',10000, elogo)
def logerror(log):
xbmc.log(log, xbmc.LOGERROR)
def showpopup(title='', msg='', delay=5000, image=''):
xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")' % (title, msg, delay, image))
def grab_cloudflare(url):
class NoRedirection(urllib2.HTTPErrorProcessor):
# Stop Urllib2 from bypassing the 503 page.
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
return response
https_response = http_response
cj = cookielib.CookieJar()
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36')]
response = opener.open(url).read()
jschl=re.compile('name="jschl_vc" value="(.+?)"/>').findall(response)
if jschl:
import time
jschl = jschl[0]
maths=re.compile('value = (.+?);').findall(response)[0].replace('(','').replace(')','')
domain_url = re.compile('(https?://.+?/)').findall(url)[0]
domain = re.compile('https?://(.+?)/').findall(domain_url)[0]
time.sleep(5)
normal = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
normal.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36')]
final= normal.open(domain_url+'cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s'%(jschl,eval(maths)+len(domain))).read()
response = normal.open(url).read()
return response
def millis():
import time as time_
return int(round(time_.time() * 1000))
def load_json(data):
def to_utf8(dct):
rdct = {}
for k, v in dct.items() :
if isinstance(v, (str, unicode)) :
rdct[k] = v.encode('utf8', 'ignore')
else :
rdct[k] = v
return rdct
try :
from lib import simplejson
json_data = simplejson.loads(data, object_hook=to_utf8)
return json_data
except:
try:
import json
json_data = json.loads(data, object_hook=to_utf8)
return json_data
except:
|
tisimst/pyDOE
|
pyDOE/doe_factorial.py
|
Python
|
bsd-3-clause
| 7,200 | 0.007917 |
"""
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
import re
import numpy as np
__all__ = ['np', 'fullfact', 'ff2n', 'fracfact']
def fullfact(levels):
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j]*level_repeat
rng = lvl*range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H
################################################################################
def ff2n(n):
"""
Create a 2-Level full-factorial design
Parameters
----------
n : int
The number of factors in the design.
Returns
-------
mat : 2d-array
The design matrix with coded levels -1 and 1
Example
-------
::
>>> ff2n(3)
array([[-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])
"""
return 2*fullfact([2]*n) - 1
################################################################################
def fracfact(gen):
"""
Create a 2-level fractional-factorial design with a generator string.
Parameters
----------
gen : str
A string, consisting of lowercase, uppercase letters or operators "-"
and "+", indicating the factors of the experiment
Returns
-------
H : 2d-array
A m-by-n matrix, the fractional factorial design. m is 2^k, where k
is the number of letters in ``gen``, and n is the total number of
entries in ``gen``.
Notes
-----
In ``gen`` we define the main factors of the experiment and the factors
whose levels are the products of the main factors. For example, if
gen = "a b ab"
then "a" and "b" are the main factors, while the 3rd factor is the product
of the first two. If we input uppercase letters in ``gen``, we get the same
result. We can also use the operators "+" and "-" in ``gen``.
For example, if
gen = "a b -ab"
then the 3rd factor is the opposite of the product of "a" and "b".
The output matrix includes the two level full factorial design, built by
the main factors of ``gen``, and the products of the main factors. The
columns of ``H`` follow the sequence of ``gen``.
For example, if
gen = "a b ab c"
then columns H[:, 0], H[:, 1], and H[:, 3] include the two level full
factorial design and H[:, 2] includes the products of the main factors.
|
Examples
--------
::
>>> fracfact("a b ab")
array([[-1., -1., 1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., 1.]])
>>> fracfact("A B AB")
array([[-1., -1., 1.],
|
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., 1.]])
>>> fracfact("a b -ab c +abc")
array([[-1., -1., -1., -1., -1.],
[ 1., -1., 1., -1., 1.],
[-1., 1., 1., -1., 1.],
[ 1., 1., -1., -1., -1.],
[-1., -1., -1., 1., 1.],
[ 1., -1., 1., 1., -1.],
[-1., 1., 1., 1., -1.],
[ 1., 1., -1., 1., 1.]])
"""
# Recognize letters and combinations
A = [item for item in re.split('\-?\s?\+?', gen) if item] # remove empty strings
C = [len(item) for item in A]
# Indices of single letters (main factors)
I = [i for i, item in enumerate(C) if item==1]
# Indices of letter combinations (we need them to fill out H2 properly).
J = [i for i, item in enumerate(C) if item!=1]
# Check if there are "-" or "+" operators in gen
U = [item for item in gen.split(' ') if item] # remove empty strings
# If R1 is either None or not, the result is not changed, since it is a
# multiplication of 1.
R1 = _grep(U, '+')
R2 = _grep(U, '-')
# Fill in design with two level factorial design
H1 = ff2n(len(I))
H = np.zeros((H1.shape[0], len(C)))
H[:, I] = H1
# Recognize combinations and fill in the rest of matrix H2 with the proper
# products
for k in J:
# For lowercase letters
xx = np.array([ord(c) for c in A[k]]) - 97
# For uppercase letters
if np.any(xx<0):
xx = np.array([ord(c) for c in A[k]]) - 65
H[:, k] = np.prod(H1[:, xx], axis=1)
# Update design if gen includes "-" operator
if R2:
H[:, R2] *= -1
# Return the fractional factorial design
return H
def _grep(haystack, needle):
try:
haystack[0]
except (TypeError, AttributeError):
return [0] if needle in haystack else []
else:
locs = []
for idx, item in enumerate(haystack):
if needle in item:
locs += [idx]
return locs
|
Jokeren/neon
|
examples/conv_autoencoder.py
|
Python
|
apache-2.0
| 3,219 | 0 |
#!/us
|
r/bin/env python
# --------------------------
|
--------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolutional autoencoder example network on MNIST data set.
Usage:
python examples/conv_autoencoder.py
"""
import numpy as np
from neon import logger as neon_logger
from neon.data import ArrayIterator, MNIST
from neon.initializers import Uniform
from neon.layers import Conv, Pooling, GeneralizedCost, Deconv
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, SumSquared
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# Load dataset
dataset = MNIST(path=args.data_dir)
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
# Set input and target to X_train
train = ArrayIterator(X_train, lshape=(1, 28, 28))
# Initialize the weights and the learning rule
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9)
# Strided conv autoencoder
bn = False
layers = [Conv((4, 4, 8), init=init_uni, activation=Rectlin(), batch_norm=bn),
Pooling(2),
Conv((4, 4, 32), init=init_uni, activation=Rectlin(), batch_norm=bn),
Pooling(2),
Deconv(fshape=(4, 4, 8), init=init_uni,
activation=Rectlin(), batch_norm=bn),
Deconv(fshape=(3, 3, 8), init=init_uni,
activation=Rectlin(), strides=2, batch_norm=bn),
Deconv(fshape=(2, 2, 1), init=init_uni, strides=2, padding=1)]
# Define the cost
cost = GeneralizedCost(costfunc=SumSquared())
model = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
# Fit the model
model.fit(train, optimizer=opt_gdm, num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
# Plot the reconstructed digits
try:
from matplotlib import pyplot, cm
fi = 0
nrows = 10
ncols = 12
test = np.zeros((28 * nrows, 28 * ncols))
idxs = [(row, col) for row in range(nrows) for col in range(ncols)]
for row, col in idxs:
im = model.layers.layers[-1].outputs.get()[:, fi].reshape((28, 28))
test[28 * row:28 * (row + 1):, 28 * col:28 * (col + 1)] = im
fi = fi + 1
pyplot.matshow(test, cmap=cm.gray)
pyplot.savefig('Reconstructed.png')
except ImportError:
neon_logger.display(
'matplotlib needs to be manually installed to generate plots')
|
fcrepo4-archive/RDFDatabank
|
rdfdatabank/config/users.py
|
Python
|
mit
| 1,238 | 0.002423 |
#-*- coding: utf-8
|
-*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
|
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
_USERS = {
'admin': {'owner': '*', 'first_name': 'Databank', 'last_name': 'Admin', 'role': 'admin', 'description': 'Admin for all silos'}
}
|
rlr/django_jinja_ref
|
django_jinja_ref/superadvanced/urls.py
|
Python
|
mpl-2.0
| 195 | 0 |
from django.conf.urls import url
from django_jinja_ref.s
|
uperadvanced import views
urlpatterns = [
url(r'^$', views.django, name='django'),
|
url(r'^jinja$', views.jinja, name='jinja'),
]
|
cglewis/bowl
|
bowl/cli_opts/snapshots.py
|
Python
|
apache-2.0
| 850 | 0.003529 |
"""
This module is the snapshots command of bowl.
Created on 17 July 2014
@author: Charlie Lewis
"""
import ast
import os
class snapshots(object):
"""
This class is responsible for the snapshots command of the cli.
"""
@classmethod
def main(self, args):
# !! TODO needs to implement login if using that
snapshots = []
try:
directory = args.metadata_path
directory = os.path.expanduser(directory)
with open(os.path.join(directory, "snapshots"), 'r') as f:
fo
|
r line in f:
snapshot = ast.literal_eval(line.rstrip("\n"))
snapshots.append(snapshot['snapshot_id'])
|
except:
pass
if not args.z:
for snapshot in snapshots:
print snapshot
return snapshots
|
jiminliang/msda-denoising
|
spearmint_variable_noise/output2csv.py
|
Python
|
apache-2.0
| 1,028 | 0.066148 |
import os, re, csv
# regular expressions for capturing the interesting quantities
noise_pattern = 'noise: \[(.+)\]'
res
|
_pattern = '^([0-9.]+$)'
search_dir = "output"
results_file = '../results.csv'
os.chdir( search_dir )
files = filter( os.path.isfile, os.listdir( '.' ))
#files = [ os.path.join( search_dir, f ) for f in files ] # add path to each file
files.sort( key=lambda x: os.path.getmtime( x ))
results = []
for file in fi
|
les:
f = open( file )
contents = f.read()
# noise
matches = re.search( noise_pattern, contents, re.DOTALL )
try:
noise = matches.group( 1 )
noise = noise.strip()
noise = noise.split()
except AttributeError:
print "noise error 1: %s" % ( contents )
continue
# rmse
matches = re.search( res_pattern, contents, re.M )
try:
res = matches.group( 1 )
except AttributeError:
print "matches error 2: %s" % ( contents )
continue
results.append( [ res ] + noise )
writer = csv.writer( open( results_file, 'wb' ))
for result in results:
writer.writerow( result )
|
cloudbase/neutron-virtualbox
|
neutron/plugins/nec/router_drivers.py
|
Python
|
apache-2.0
| 9,085 | 0.00011 |
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import httplib
from oslo_utils import excutils
import six
from neutron.common import log as call_log
from neutron.common import utils
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import constants as nconst
from neutron.plugins.nec.common import exceptions as nexc
LOG = logging.getLogger(__name__)
PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW
@six.add_metaclass(abc.ABCMeta)
class RouterDriverBase(object):
def __init__(self, plugin, ofc_manager):
self.plugin = plugin
self.ofc = ofc_manager
def floating_ip_support(self):
return True
@abc.abstractmethod
def create_router(self, context, tenant_id, router):
pass
@abc.abstractmethod
def update_router(self, context, router_id, old_router, new_router):
pass
@abc.abstractmethod
def delete_router(self, context, router_id, router):
pass
@abc.abstractmethod
def add_interface(self, context, router_id, port):
pass
@abc.abstractmethod
def delete_interface(self, context, router_id, port):
pass
class RouterL3AgentDriver(RouterDriverBase):
need_gw_info = False
@call_log.log
def create_router(self, context, tenant_id, router):
return router
@call_log.log
def update_router(self, context, router_id, old_router, new_router):
return new_router
@call_log.log
def delete_router(self, context, router_id, router):
pass
@call_log.log
def add_interface(self, context, router_id, port):
return self.plugin.activate_port_if_ready(context, port)
@call_log.log
def delete_interface(self, context, router_id, port):
return self.plugin.deactivate_port(context, port)
class RouterOpenFlowDriver(RouterDriverBase):
need_gw_info = True
def floating_ip_support(self):
return self.ofc.driver.router_nat_supported
def _process_gw_port(self, gw_info, routes):
if gw_info and gw_info['gateway_ip']:
routes.append({'destination': '0.0.0.0/0',
'nexthop': gw_info['gateway_ip']})
@call_log.log
def create_router(self, context, tenant_id, router):
try:
router_id = router['id']
added_routes = []
self.ofc.ensure_ofc_tenant(context, tenant_id)
self.ofc.create_ofc_router(context, tenant_id, router_id,
router['name'])
self._process_gw_port(router['gw_port'], added_routes)
if added_routes:
self.ofc.update_ofc_router_route(context, router_id,
added_routes, [])
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(context, "router",
router['id'],
new_status)
router['status'] = new_status
return router
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
if (isinstance(exc, nexc.OFCException) and
exc.status == httplib.CONFLICT):
raise nexc.RouterOverLimit(provider=PROVIDER_OPENFLOW)
LOG.error(_LE("create_router() failed due to %s"), exc)
new_status = nconst.ROUTER_STATUS_ERROR
self._update_resource_status(context, "router",
router['id'],
new_status)
@call_log.log
def update_router(self, context, router_id, old_router, new_router):
old_routes = old_router['routes'][:]
new_routes = new_router['routes'][:]
self._process_gw_port(old_router['gw_port'], old_routes)
self._process_gw_port(new_router['gw_port'], new_routes)
added, removed = utils.diff_list_of_dict(old_routes, new_routes)
if added or removed:
try:
# NOTE(amotoki): PFC supports one-by-one route update at now.
# It means there may be a case where some route is updated but
# some not. To allow the next call of failures to sync routes
# with Neutron side, we pass the whole new routes here.
# PFC should support atomic route update in the future.
self.ofc.update_ofc_router_route(context, router_id,
new_routes)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(
context, "router", router_id, new_status)
new_router['status'] = new_status
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("_update_ofc_routes() failed due to %s"),
exc)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(
context, "router", router_id, new_status)
return new_router
@call_log.log
def delete_router(self, context, router_id, router):
if not self.ofc.exists_ofc_router(context, router_id):
return
try:
self.ofc.delete_ofc_router(context, router_id, router)
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("delete_router() failed due to %s"), exc)
self.plugin._update_resource_status(
context, "router", router_id, nconst.ROUTER_STATUS_ERROR)
@call_log.log
def add_interface(self, context, router_id, port):
port_id = port['id']
# port['fixed_ips'] may be empty if ext_net has no subnet.
# Such port is invalid for a router port and we don't create a port
# on OFC. The port is removed in l3_db._create_router_gw_port.
if not port['fixed_ips']:
LOG.warning(_LW('RouterOpenFlowDriver.add_interface(): the '
'requested port '
'has no subnet. add_interface() is skipped. '
'router_id=%(id)s, port=%(port)s)'),
{'id': router_id, 'port': port})
return port
fixed_ip = port['fixed_ips'][0]
subnet = self.plugin._get_subnet(context, fixed_ip['subnet_id'])
port_info = {'network_id': port['network_id'],
'ip_address': fixed_ip['ip_address'],
'cidr': subnet['cidr'],
'mac_address': port['mac_address']}
try:
self.of
|
c.add_ofc_router_interface(context, router_id,
|
port_id, port_info)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(
context, "port", port_id, new_status)
return port
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("add_router_interface() failed due to %s"), exc)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(
context, "p
|
william-richard/moto
|
moto/secretsmanager/models.py
|
Python
|
apache-2.0
| 20,426 | 0.001175 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import json
import uuid
import datetime
from boto3 import Session
from moto.core import BaseBackend, BaseModel
from .exceptions import (
SecretNotFoundException,
SecretHasNoValueException,
InvalidParameterException,
ResourceExistsException,
ResourceNotFoundException,
InvalidRequestException,
ClientError,
)
from .utils import random_password, secret_arn, get_secret_name_from_arn
from .list_secrets.filters import all, tag_key, tag_value, description, name
_filter_functions = {
"all": all,
"name": name,
"description": description,
"tag-key": tag_key,
"tag-value": tag_value,
}
def filter_keys():
return list(_filter_functions.keys())
def _matches(secret, filters):
is_match = True
for f in filters:
# Filter names are pre-validated in the resource layer
filter_function = _filter_functions.get(f["Key"])
is_match = is_match and filter_function(secret, f["Values"])
return is_match
class SecretsManager(BaseModel):
def __init__(self, region_name, **kwargs):
self.region = region_name
class FakeSecret:
def __init__(
self,
region_name,
secret_id,
secret_string=None,
secret_binary=None,
description=None,
tags=[],
version_id=None,
version_stages=None,
):
self.secret_id = secret_id
self.name = secret_id
self.arn = secret_arn(region_name, secret_id)
self.secret_string = secret_string
self.secret_binary = secret_binary
self.description = description
self.tags = tags
self.version_id = version_id
self.version_stages = version_stages
self.rotation_enabled = False
self.rotation_lambda_arn = ""
self.auto_rotate_after_days = 0
self.deleted_date = None
def update(self, description=None, tags=[]):
self.description = description
self.tags = tags
def set_versions(self, versions):
self.versions = versions
def set_default_version_id(self, version_id):
self.default_version_id = version_id
def reset_default_version(self, secret_version, version_id):
# remove all old AWSPREVIOUS stages
for old_version in self.versions.values():
if "AWSPREVIOUS" in old_version["version_stages"]:
old_version["version_stages"].remove("AWSPREVIOUS")
# set old AWSCURRENT secret to AWSPREVIOUS
previous_current_version_id = self.default_version_id
self.versions[previous_current_version_id]["version_stages"] = ["AWSPREVIOUS"]
self.versions[version_id] = secret_version
self.default_version_id = version_id
def delete(self, deleted_date):
self.deleted_date = deleted_date
def restore(self):
self.deleted_date = None
def is_deleted(self):
return self.deleted_date is not None
def to_short_dict(self, include_version_stages=False):
dct = {
"ARN": self.arn,
"Name": self.name,
"VersionId": self.default_version_id,
}
if include_version_stages:
dct["VersionStages"] = self.version_stages
return json.dumps(dct)
def to_dict(self):
version_id_to_stages = self._form_version_ids_to_stages()
return {
"ARN": self.arn,
"Name": self.name,
"Description": self.description or "",
"KmsKeyId": "",
"RotationEnabled": self.rotation_enabled,
"RotationLambdaARN": self.rotation_lambda_arn,
"RotationRules": {"AutomaticallyAfterDays": self.auto_rotate_after_days},
"LastRotatedDate": None,
"LastChangedDate": None,
"LastAccessedDate": None,
"DeletedDate": self.deleted_date,
"Tags": self.tags,
"VersionIdsToStages": version_id_to_stages,
"SecretVersionsToStages": version_id_to_stages,
}
def _form_version_ids_to_stages(self):
version_id_to_stages = {}
for key, value in self.versions.items():
version_id_to_stages[key] = value["version_stages"]
return version_id_to_stages
class SecretsStore(dict):
def __setitem__(self, key, value):
new_key = get_secret_name_from_arn(key)
super(SecretsStore, self).__setitem__(new_key, value)
def __getitem__(self, key):
new_key = get_secret_name_from_arn(key)
return super(SecretsStore, self).__getitem__(new_key)
def __contains__(self, key):
new_key = get_secret_name_from_arn(key)
return dict.__contains__(self, new_key)
def pop(self, key, *args, **kwargs):
new_key = get_secret_name_from_arn(key)
return super(SecretsStore, self).pop(new_key, *args, **kwargs)
class SecretsManagerBackend(BaseBackend):
def __init__(self, region_name=None, **kwargs):
super(SecretsManagerBackend, self).__init__()
self.region = region_name
self.secrets = SecretsStore()
def reset(self):
region_name = self.region
self.__dict__ = {}
self.__init__(region_name)
def _is_valid_identifier(self, identifier):
return identifier in self.secrets
def _unix_time_secs(self, dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()
def get_secret_value(self, secret_id, version_id, version_stage):
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
if not version_id and version_stage:
# set version_id to match version_stage
versions_dict = self.secrets[secret_id].versions
for ver_id, ver_val in versions_dict.items():
if version_stage in ver_val["version_stages"]:
version_id = ver_id
break
if not version_id:
raise SecretNotFoundException()
# TODO check this part
if self.secrets[secret_id].is_deleted():
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
secret = self.secrets[secret_id]
version_id = version_id or secret.default_version_id
secret_version = secret.versions.get(version_id)
if not secret_version:
raise ResourceNotFoundException(
"An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets "
"Manager can't find the specified secret value for VersionId: {}".format(
version_id
)
)
response_data = {
"ARN": secret.arn,
"Name": secret.name,
"VersionId": secret_version["version_id"],
"VersionStages": secret_version["version_stages"],
"CreatedDate": secret_version["createdate"],
}
if "secret_string" in secret_version:
response_data["SecretString"] = secret_version["secret_string"]
if "secret_binary" in secret_version:
response_data["SecretBinary"] = secret_version["secret_binary"]
if (
"secret_string" not in secret_version
and "secret_binary" not in secret_version
):
raise SecretHasNoValueException(version_stage or "AWSCURRENT")
response = json.dumps(response_data)
return response
def update_secret(
self
|
, secret_id, secret_string=None, secret_binary=None, **kwargs
):
# error if secret does not exist
if secret_id not in self.secrets.keys():
raise SecretNotFoundException()
if self.secrets[secret_id].is_deleted():
raise InvalidRequestException(
"An error occurred
|
(InvalidRequestException) when calling the UpdateSecret operation: "
"Y
|
poffuomo/spark
|
python/pyspark/sql/dataframe.py
|
Python
|
apache-2.0
| 72,001 | 0.002736 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
import warnings
from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SQLContext`::
people = sqlContext.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SQLContext
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SQLContext`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
|
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
|
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` a
|
dashea/pykickstart
|
tests/commands/reboot.py
|
Python
|
gpl-2.0
| 2,617 | 0.002293 |
#
# Brian C. Lane <bcl@redhat.com>
#
# Copyright 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.constants import KS_REBOOT, KS_SHUTDOWN, KS_WAIT
class FC3_TestCase(CommandTest):
command = "reboot"
def runTest(self):
# pass
cmd = self.assert_parse("reboot")
self.assertEqual(cmd.action, KS_REBOOT)
self.assertEqual(str(cmd), "# Reboot after installation\nreboot\n")
cmd = self.assert_parse("shutdown")
|
self.assertEqual(cmd.action, KS_SHUTDOWN)
self.assertEqual(str(cmd), "# Shutdown after installation\n
|
shutdown\n")
cmd = self.assert_parse("halt")
# halt changed in F18
if self.__class__.__name__ in ("FC3_TestCase", "FC6_TestCase"):
self.assertEqual(cmd.action, KS_SHUTDOWN)
cmd = self.assert_parse("poweroff")
self.assertEqual(cmd.action, KS_SHUTDOWN)
class FC6_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
# pass
cmd = self.assert_parse("reboot --eject")
self.assertEqual(cmd.action, KS_REBOOT)
self.assertEqual(cmd.eject, True)
self.assertEqual(str(cmd), "# Reboot after installation\nreboot --eject\n")
class F18_TestCase(FC6_TestCase):
def runTest(self):
FC6_TestCase.runTest(self)
# pass
cmd = self.assert_parse("halt")
self.assertEqual(cmd.action, KS_WAIT)
self.assertEqual(str(cmd), "# Halt after installation\nhalt\n")
cmd = self.assert_parse("halt --eject")
self.assertEqual(cmd.eject, True)
self.assertEqual(str(cmd), "# Halt after installation\nhalt --eject\n")
if __name__ == "__main__":
unittest.main()
|
TrevorLowing/PyGames
|
pysollib/winsystems/__init__.py
|
Python
|
gpl-2.0
| 1,307 | 0.011477 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes
|
Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Found
|
ation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
from pysollib.settings import WIN_SYSTEM
if WIN_SYSTEM == 'win32':
import win32 as gui
elif WIN_SYSTEM == 'aqua':
import aqua as gui
else: # 'x11'
import x11 as gui
init_root_window = gui.init_root_window
TkSettings = gui.TkSettings
|
acdha/django-modeltranslation
|
modeltranslation/settings.py
|
Python
|
bsd-3-clause
| 2,611 | 0.00383 |
# -*- coding: utf-8 -*-
from warnings import warn
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
TRANSLATION_FILES = tuple(getattr(settings, 'MODELTRANSLATION_TRANSLATION_FILES', ()))
TRANSLATION_REGISTRY = getattr(settings, 'MODELTRANSLATION_TRANSLATION_REGISTRY', None)
if TRANSLATION_REGISTRY:
TRA
|
NSLATION_FILES += (TRANSLATION_REGISTRY,)
warn('The setting MODELTRANSLATION_TRANSLATION_REGISTRY is deprecated, '
'use MODELTRANSLATION_TRANSLATION_FILES instead.', DeprecationWarning)
AVAILABLE_LANGUAGE
|
S = [l[0] for l in settings.LANGUAGES]
DEFAULT_LANGUAGE = getattr(settings, 'MODELTRANSLATION_DEFAULT_LANGUAGE', None)
if DEFAULT_LANGUAGE and DEFAULT_LANGUAGE not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured('MODELTRANSLATION_DEFAULT_LANGUAGE not in LANGUAGES setting.')
elif not DEFAULT_LANGUAGE:
DEFAULT_LANGUAGE = AVAILABLE_LANGUAGES[0]
# Load allowed CUSTOM_FIELDS from django settings
CUSTOM_FIELDS = getattr(settings, 'MODELTRANSLATION_CUSTOM_FIELDS', ())
# Don't change this setting unless you really know what you are doing
ENABLE_REGISTRATIONS = getattr(settings, 'MODELTRANSLATION_ENABLE_REGISTRATIONS', settings.USE_I18N)
# Modeltranslation specific debug setting
DEBUG = getattr(settings, 'MODELTRANSLATION_DEBUG', settings.DEBUG)
AUTO_POPULATE = getattr(settings, 'MODELTRANSLATION_AUTO_POPULATE', False)
# FALLBACK_LANGUAGES should be in either format:
# MODELTRANSLATION_FALLBACK_LANGUAGES = ('en', 'de')
# MODELTRANSLATION_FALLBACK_LANGUAGES = {'default': ('en', 'de'), 'fr': ('de',)}
# By default we fallback to the default language
FALLBACK_LANGUAGES = getattr(settings, 'MODELTRANSLATION_FALLBACK_LANGUAGES', (DEFAULT_LANGUAGE,))
if isinstance(FALLBACK_LANGUAGES, (tuple, list)):
FALLBACK_LANGUAGES = {'default': FALLBACK_LANGUAGES}
if 'default' not in FALLBACK_LANGUAGES:
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES does not contain "default" key.')
for key, value in FALLBACK_LANGUAGES.iteritems():
if key != 'default' and key not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES: "%s" not in LANGUAGES setting.' % key)
if not isinstance(value, (tuple, list)):
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES: value for key "%s" is not list nor tuple.' % key)
for lang in value:
if lang not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES: "%s" not in LANGUAGES setting.' % lang)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/py-binwalk/package.py
|
Python
|
lgpl-2.1
| 1,674 | 0.000597 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser Ge
|
neral Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBinwalk(PythonPackage):
"
|
""Binwalk is a fast, easy to use tool for analyzing, reverse engineering,
and extracting firmware images."""
homepage = "https://github.com/devttys0/binwalk"
url = "https://pypi.io/packages/source/b/binwalk/binwalk-2.1.0.tar.gz"
version('2.1.0', '054867d9abe6a05f43200cf2591051e6')
depends_on('python')
depends_on('py-setuptools', type='build')
|
csecutsc/GmailBot
|
quickstart.py
|
Python
|
mit
| 2,344 | 0.00384 |
# Copyright (C) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
|
;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.o
|
rg/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://mail.google.com/'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
get_credentials()
if __name__ == '__main__':
main()
|
MichaelKohler/bedrock
|
tests/pages/firefox/welcome/page5.py
|
Python
|
mpl-2.0
| 1,575 | 0.00127 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
from pages.regions.modal import ModalProtocol
from pages.regions.send_to_device import SendToDevice
class FirefoxWelcomePage5(BasePage):
_URL_TEMPLATE = '/{locale}/firefox/welcome/5/'
_modal_primary_button_locator = (By.CSS_SELECTOR, '.primary-cta .js-modal-link')
_modal_secondary_button_locator = (By.CSS_SELECTOR, '.secondary-cta .js-modal-link')
_lockwise_qr_code_locator = (By.ID, 'lockwise-qr')
@property
def send_to_device(self):
return SendToDevice(self)
@property
def is_lockwise_qr_code_displayed(self):
return self.is_element_displayed
|
(*self._lockwise_qr_code_locator)
@property
def is_primary_modal_button_displayed(self):
return self.is_element_displayed(*self._mod
|
al_primary_button_locator)
@property
def is_secondary_modal_button_displayed(self):
return self.is_element_displayed(*self._modal_secondary_button_locator)
def open_modal(self, locator):
modal = ModalProtocol(self)
self.find_element(*locator).click()
self.wait.until(lambda s: modal.is_displayed)
return modal
def click_primary_modal_button(self):
self.scroll_element_into_view(*self._modal_primary_button_locator)
return self.open_modal(self._modal_primary_button_locator)
|
oyang/testFalsk
|
app.py
|
Python
|
mit
| 109 | 0 |
from flask import Flask
app = Flask(__name__)
app.config.from_object("con
|
figs.appconfig.DevelopmentConfig")
|
|
bongo-project/bongo
|
src/libs/python/bongo/external/email/mime/multipart.py
|
Python
|
gpl-2.0
| 1,377 | 0.000726 |
# Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base class for MIME multipart/* type messages."""
__all__ = ['MIMEMultipart']
from bongo.external.email.mime.base import MIMEBase
class MIMEMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
**_params):
"""Creates a multipart/* type message.
By default, creates a multipart/mixed message, with proper
Content-Type and MIME-Version headers.
_subtype is the subtype of the multipart content ty
|
pe, defaulting to
`mixed'.
boundary is the multipart boundary string. By default it is
calculated as needed.
_subparts is a sequence of initial subparts for the payload. It
must be an iterable object, such as a list. You can always
attach new subparts to the message by using the attach() method.
Additional parameters for the Content-Type header are taken fro
|
m the
keyword arguments (or passed into the _params argument).
"""
MIMEBase.__init__(self, 'multipart', _subtype, **_params)
if _subparts:
for p in _subparts:
self.attach(p)
if boundary:
self.set_boundary(boundary)
|
sunqm/pyscf
|
examples/local_orb/nlocal.py
|
Python
|
apache-2.0
| 6,720 | 0.058333 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Localization based on UNO from UHF/UKS check files
#
from functools import reduce
import numpy
import scipy.linalg
import h5py
from pyscf import tools,gto,scf,dft
from pyscf.tools import molden
import pmloc
import ulocal
def sqrtm(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v*numpy.sqrt(e), v.T.conj())
def lowdin(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v/numpy.sqrt(e), v.T.conj())
def dumpLUNO(fname,thresh=0.01):
chkfile = fname+'.chk'
outfile = fname+'_cmo.molden'
tools.molden.from_chkfile(outfile, chkfile)
#=============================
# Natural orbitals
# Lowdin basis X=S{-1/2}
# psi = chi * C
# = chi' * C'
# = chi*X*(X{-1}C')
#=============================
mol,mf = scf.chkfile.load_scf(chkfile)
mo_coeff = mf["mo_coeff"]
ova=mol.intor_symmetric("cint1e_ovlp_sph")
nb = mo_coeff.shape[1]
# Check overlap
diff = reduce(numpy.dot,(mo_coeff[0].T,ova,mo_coeff[0])) - numpy.identity(nb)
print numpy.linalg.norm(diff)
diff = reduce(numpy.dot,(mo_coeff[1].T,ova,mo_coeff[1])) - numpy.identity(nb)
print numpy.linalg.norm(diff)
# UHF-alpha/beta
ma = mo_coeff[0]
mb = mo_coeff[1]
nalpha = (mol.nelectron+mol.spin)/2
nbeta = (mol.nelectron-mol.spin)/2
# Spin-averaged DM
pTa = numpy.dot(ma[:,:nalpha],ma[:,:nalpha].T)
pTb = numpy.dot(mb[:,:nbeta],mb[:,:nbeta].T)
pT = 0.5*(pTa+pTb)
# Lowdin basis
s12 = sqrtm(ova)
s12inv = lowdin(ova)
pTOAO = reduce(numpy.dot,(s12,pT,s12))
eig,coeff = scipy.linalg.eigh(-pTOAO)
eig = -2.0*eig
eig[eig<0.0]=0.0
eig[abs(eig)<1.e-14]=0.0
ifplot = False #True
if ifplot:
import matplotlib.pyplot as plt
plt.plot(range(nb),eig,'ro')
plt.show()
# Back to AO basis
coeff = numpy.dot(s12inv,coeff)
diff = reduce(numpy.dot,(coeff.T,ova,coeff)) - numpy.identity(nb)
print 'CtSC-I',numpy.linalg.norm(diff)
#
# Averaged Fock
#
enorb = mf["mo_energy"]
fa = reduce(numpy.dot,(ma,numpy.diag(enorb[0]),ma.T))
fb = reduce(numpy.dot,(mb,numpy.diag(enorb[1]),mb.T))
# Non-orthogonal cases: FC=SCE
# Fao = SC*e*C{-1} = S*C*e*Ct*S
fav = 0.5*(fa+fb)
# Expectation value of natural orbitals <i|F|i>
fexpt = reduce(numpy.dot,(coeff.T,ova,fav,ova,coeff))
enorb = numpy.diag(fexpt)
nocc = eig.copy()
#
# Reordering and define active space according to thresh
#
idx = 0
active=[]
for i in range(nb):
if nocc[i]<=2.0-thresh and nocc[i]>=thresh:
active.append(True)
else:
active.append(False)
print '\nNatural orbitals:'
for i in range(nb):
print 'orb:',i,active[i],nocc[i],enorb[i]
active = numpy.array(active)
actIndices = list(numpy.argwhere(active==True).flatten())
cOrbs = coeff[:,:actIndices[0]]
aOrbs = coeff[:,actIndices]
vOrbs = coeff[:,actIndices[-1]+1:]
nb = cOrbs.shape[0]
nc = cOrbs.shape[1]
na = aOrbs.shape[1]
nv = vOrbs.shape[1]
print 'core orbs:',cOrbs.shape
print 'act orbs:',aOrbs.shape
print 'vir orbs:',vOrbs.shape
assert nc+na+nv == nb
# dump UNO
with open(fname+'_uno.molden','w') as thefile:
molden.header(mol,thefile)
molden.orbital_coeff(mol,thefile,coeff)
#=====================
# Population analysis
#=====================
from pyscf import lo
aux = lo.orth_ao(mol,method='meta_lowdin')
#clmo = ulocal.scdm(cOrbs,ova,aux)
#almo = ulocal.scdm(aOrbs,ova,aux)
clmo = cOrbs
almo = aOrbs
ierr,uc = pmloc.loc(mol,clmo)
ierr,ua = pmloc.loc(mol,almo)
clmo = clmo.dot(uc)
almo = almo.dot(ua)
vlmo = ulocal.scdm(vOrbs,ova,aux)
# P-SORT
mo_c,n_c,e_c = ulocal.psort(ova,fav,pT,clmo)
mo_o,n_o,e_o = ulocal.psort(ova,fav,pT,almo)
mo_v,n_v,e_v = ulocal.psort(ova,fav,pT,vlmo)
lmo = numpy.hstack((mo_c,mo_o,mo_v)).copy()
enorb = numpy.hstack([e_c,e_o,e_v])
occ = numpy.hstack([n_c,n_o,n_v])
# CHECK
diff = reduce(numpy.dot,(lmo.T,ova,lmo)) - numpy.identity(nb)
print 'diff=',numpy.linalg.norm(diff)
ulocal.lowdinPop(mol,lmo,ova,enorb,occ)
ulocal.dumpLMO(mol,fname,lmo)
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
return mol,ova,fav,pT,nb,nalpha,nbeta,nc,na,nv,lmo,enorb,occ
def dumpAct(fname,info,actlst,base=1):
actlst2 = [i-base for i in actlst]
mol,ova,fav,pT,nb,nalpha,nbeta,nc,na,nv,lmo,enorb,occ = info
corb = set(range(nc))
aorb = set(range(nc,nc+na))
vorb = set(range(nc+na,nc+na+nv))
print '[dumpAct]'
print ' corb=',corb
print ' aorb=',aorb
print ' vorb=',vorb
sorb = set(actlst2)
rcorb = corb.difference(corb.intersection(sorb))
#assuming act in actlst
#raorb = aorb.difference(aorb.intersection(sorb))
rvorb = vorb.difference(vorb.intersection(sorb))
corb = list(rcorb)
aorb = list(sorb)
vorb = list(rvorb)
print ' corb=',corb
print ' aorb=',aorb
print ' vorb=',vorb
clmo = lmo[:,corb].copy()
almo = lmo[:,aorb].copy()
vlmo = lmo[:,vorb].copy()
ierr,ua = pmloc.loc(mol,almo)
almo = almo.dot(ua)
#>>> DUMP <<<#
# P-SORT
mo_c = clmo
mo_v = vlmo
e_c = enorb[corb].copy()
e_v = enorb[vorb].copy()
n_c = occ[corb].copy()
|
n_v = occ[vorb].copy()
mo_o,n_o,e_o = ulocal.psort(ova,fav,pT,almo)
lmo2 = numpy.hstack((mo_c,mo_o,mo_v))
enorb = numpy.hstack([e_c,e_o,e_v])
occ = numpy.hstack([n_c,n_o,n_v])
assert len(enorb)==nb
assert len(occ)==nb
# CHECK
diff = reduce(numpy.dot,(lmo2.T,ova,lmo2)) - numpy.identity(nb)
print 'diff=',numpy.
|
linalg.norm(diff)
ulocal.lowdinPop(mol,lmo,ova,enorb,occ)
ulocal.dumpLMO(mol,fname+'_new',lmo2)
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
print 'diff(LMO2-LMO)=',numpy.linalg.norm(lmo2-lmo)
nc = len(e_c)
na = len(e_o)
nv = len(e_v)
assert na == len(actlst)
assert nc+na+nv == nb
print 'nc,na,nv,nb=',nc,na,nv,nb
return lmo2,nc,na,nv
if __name__ == '__main__':
fname = 'hs_bp86'
info = dumpLUNO(fname)
actlst = [117,118,119,120,125,126]+range(127,137)
dumpAct(fname,info,actlst,base=1)
|
shuttl-io/shuttl
|
shuttl/tests/test_models/test_user.py
|
Python
|
mit
| 1,929 | 0.00311 |
from shuttl.tests import testbase
from shuttl.Models.User import User, UserDataTakenException, NoOrganizationException, ToManyOrganizations
from shuttl.Models.organization import Organization
from shuttl.Models.Reseller import Reseller
class UserTestCase(testbase.BaseTest):
def _setUp(self):
self.reseller = Reseller(name ="test4", url="test2.com")
self.reseller.save()
pass
def test_create(self):
organization = Organization(name="Test", reseller=self.reseller)
organization.save()
organization = Organization.Get(name="Test", vendor=self.reseller)
data = dict(organization=organization, username="Tester", email="Test@tesi.com", password="Things")
|
user = User.Create(**data)
self.assertRaises(UserData
|
TakenException, User.Create, **data)
user2 = User.query.get(user.id)
self.assertEqual(user2.username, user.username)
self.assertEqual(user2, user)
self.assertEqual(user2.password, user.password)
self.assertNotEqual(user2.password, "Things")
self.assertFalse(user.isAdmin)
self.assertFalse(user.isFree)
self.assertFalse(user.isActive)
self.assertFalse(user.is_active)
self.assertFalse(user.is_active)
self.assertIsNotNone(user2.organization)
user.organization = None
self.assertRaises(NoOrganizationException, user.save)
pass
def test_password(self):
org = Organization.Create(name="Test", reseller=self.reseller)
usr = User.Create(organization=org, username="Tester", email="blah@blah.com", password="Bullshit")
oldPW = usr.password
self.assertNotEqual(usr.password, "Bullshit")
self.assertTrue(usr.checkPassword("Bullshit"))
usr.setPassword("Things")
self.assertNotEqual(usr.password, oldPW)
self.assertTrue(usr.checkPassword("Things"))
pass
|
endlessm/chromium-browser
|
tools/swarming_client/third_party/cachetools/lru.py
|
Python
|
bsd-3-clause
| 1,483 | 0 |
from __future__ import absolute_import
import collections
from .cache import Cache
class LRUCache(Cache):
"""Least Recently Used (LRU) cache implementation."""
def __init__(self, maxsize, missing=None, getsizeof=None):
Cache.__init__(self, maxsize, missing, getsizeof)
self.__order = collections.OrderedDict()
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
value = cache_getitem(self, key)
self.__update(key)
return value
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
cache_setitem(self, key, value)
self.__update(key)
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
cache_delitem(self, key)
del self.__order[key]
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used."""
try:
key = next(iter(self.__order))
except StopIteration:
|
raise KeyError('%s is empty' % self.__class__.__name__)
else:
return (key, self.pop(key))
if hasattr(collections.OrderedDict, 'move_to_end'):
def __update(self, key):
try:
self.__order.move_to_end(key)
except KeyError:
self.__order[key] = None
else:
def __update(self, key):
try:
self.__order[key] = self.
|
__order.pop(key)
except KeyError:
self.__order[key] = None
|
skywalka/splunk-for-nagios
|
bin/mklivestatus.py
|
Python
|
gpl-3.0
| 87 | 0.034483 |
#
|
Configs for mk-livestatus lookup scripts
HOST = [ 'nagios', 'nagios1' ]
PORT = 6557
| |
vianasw/spot_launcher
|
spot_launcher/spot_launcher.py
|
Python
|
apache-2.0
| 4,508 | 0.004215 |
#!/usr/bin/env python
# -*- coding
|
: utf-8 -*-
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType
from boto.ec2.blockdevicemapping import Bl
|
ockDeviceMapping
import time
import copy
import argparse
import sys
import pprint
import os
import yaml
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.join(BASE_PATH, '../configs')
def launch_from_config(conn, instance_config_name, config_file_name):
spot_requests_config = get_config(config_file_name)
config = spot_requests_config[instance_config_name]
mapping = create_mapping(config)
print 'Launching %s instances'%(instance_config_name)
print 'Instance parameters:'
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
spot_req = conn.request_spot_instances(
config['price'],
config['ami_id'],
count=config['count'],
type=config['type'],
key_name=config['key_name'],
instance_type=config['instance_type'],
placement_group=config['placement_group'],
security_group_ids=config['security_groups'],
subnet_id=config['subnet_id'],
instance_profile_name=config['instance_profile_name'],
block_device_map=mapping
)
request_ids = [req.id for req in spot_req]
print 'Waiting for fulfillment'
instance_ids = wait_for_fulfillment(conn, request_ids,
copy.deepcopy(request_ids))
if 'tags' in config:
tag_instances(conn, instance_ids, config['tags'])
return instance_ids
def get_config(config_file_name):
config_file = open(os.path.join(CONFIG_PATH, config_file_name))
config_dict = yaml.load(config_file.read())
return config_dict
def create_mapping(config):
if 'mapping' not in config:
return None
mapping = BlockDeviceMapping()
for ephemeral_name, device_path in config['mapping'].iteritems():
ephemeral = BlockDeviceType()
ephemeral.ephemeral_name = ephemeral_name
mapping[device_path] = ephemeral
return mapping
def wait_for_fulfillment(conn, request_ids, pending_request_ids):
"""Loop through all pending request ids waiting for them to be fulfilled.
If a request is fulfilled, remove it from pending_request_ids.
If there are still pending requests, sleep and check again in 10 seconds.
Only return when all spot requests have been fulfilled."""
instance_ids = []
failed_ids = []
time.sleep(10)
pending_statuses = set(['pending-evaluation', 'pending-fulfillment'])
while len(pending_request_ids) > 0:
results = conn.get_all_spot_instance_requests(
request_ids=pending_request_ids)
for result in results:
if result.status.code == 'fulfilled':
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s fulfilled!'%result.id
instance_ids.append(result.instance_id)
elif result.status.code not in pending_statuses:
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s could not be fulfilled. ' \
'Status code: %s'%(result.id, result.status.code)
failed_ids.append(result.id)
if len(pending_request_ids) > 0:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(10)
if len(failed_ids) > 0:
print 'The following spot requests ' \
'have failed: %s'%(', '.join(failed_ids))
else:
print 'All spot requests fulfilled!'
return instance_ids
def tag_instances(conn, instance_ids, tags):
instances = conn.get_only_instances(instance_ids=instance_ids)
for instance in instances:
for key, value in tags.iteritems():
instance.add_tag(key=key, value=value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('instance', type=str,
help='Instance config name to launch')
parser.add_argument('-r', '--region', type=str, default='us-east-1',
help='EC2 region name')
parser.add_argument('-c', '--config-file', type=str, default='spot_requests.yml',
help='Spot requests config file name')
args = parser.parse_args()
conn = boto.ec2.connect_to_region(args.region)
config_file_name = args.config_file
instance_config_name = args.instance
launch_from_config(conn, instance_config_name, config_file_name)
if __name__ == '__main__':
main()
|
BioroboticsLab/diktya
|
tests/test_blocks.py
|
Python
|
apache-2.0
| 1,576 | 0 |
# Copyright 2016 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Lice
|
nse at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requi
|
red by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from diktya.blocks import conv2d_block, resnet
from diktya.func_api_helpers import sequential
from keras.layers import Input
from keras.engine.training import Model
def test_conv2d_block():
x = Input(shape=(1, 8, 8))
y = sequential(
conv2d_block(4)
)(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 8, 8)
x = Input(shape=(1, 8, 8))
y = sequential(
conv2d_block(4, pooling='avg')
)(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 4, 4)
x = Input(shape=(1, 8, 8))
y = sequential(
conv2d_block(4, up=True)
)(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 16, 16)
def test_resnet():
n = 4
x = Input(shape=(1, 8, 8))
y = sequential([
conv2d_block(n),
resnet(n)
])(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, n, 8, 8)
|
PressLabs/gitfs
|
tests/repository/base.py
|
Python
|
apache-2.0
| 759 | 0 |
# Copyright 2014-2016 Pre
|
sslabs SRL
#
# Licensed under the Apache License, Versi
|
on 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RepositoryBaseTest(object):
def setup(self):
self.remote_url = "https://example.com"
self.branch = "master"
self.repo_path = "/home/gigirepo"
|
zuck/prometeo-erp
|
products/models.py
|
Python
|
lgpl-3.0
| 7,390 | 0.005954 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This file is part of the prometeo project.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.5'
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from prometeo.core.models import Commentable
class Product(Commentable):
"""Product model.
"""
name = models.CharField(max_length=255, verbose_name=_('name'))
code = models.CharField(max_length=255, verbose_name=_('code'))
ean13 = models.CharField(max_length=13, blank=True, verbose_name=_('EAN13'))
description = models.TextField(blank=True, verbose_name=_('description'))
uom = models.CharField(max_length=20, choices=settings.PRODUCT_UOM_CHOICES, default=settings.PRODUCT_DEFAULT_UOM, verbose_name=_('UOM'))
uos = models.CharField(max_length=20, choices=settings.PRODUCT_UOM_CHOICES, default=settings.PRODUCT_DEFAULT_UOM, verbose_name=_('UOS'))
uom_to_uos = models.FloatField(default=1.0, help_text=_('Conversion rate between UOM and UOS'), verbose_name=_('UOM to UOS'))
weight = models.FloatField(default=1.0, verbose_name=_('unit weight (Kg)'))
is_consumable = models.BooleanField(default=False, verbose_name=_('consumable?'))
is_service = models.BooleanField(default=False, verbose_name=_('service?'))
sales_price = models.FloatField(default=0.0, verbose_name=_('sales price'))
sales_currency = models.CharField(max_length=3, choices=settings.CURRENCIES, default=settings.DEFAULT_CURRENCY, verbose_name=_('sales currency'))
max_sales_discount = models.FloatField(default=0.0, verbose_name=_('max sales discount (%)'))
sales_tax = models.FloatField(default=0.0, verbose_name=_('sales tax (%)'))
suppliers = models.ManyToManyField('partners.Partner', through='products.Supply', null=True, blank=True, verbose_name=_('suppliers'))
categories = models.ManyToManyField('taxonomy.Category', null=True, blank=True, verbose_name=_('categories'))
tags = models.ManyToManyField('taxonomy.Tag', null=True, blank=True, verbose_name=_('tags'))
dashboard = models.OneToOneField('widgets.Region', null=True, verbose_name=_("dashboard"))
stream = models.OneToOneField('notifications.Stream', null=True, verbose_name=_('stream'))
class Meta:
ordering = ('code',)
verbose_name = _('product')
verbose_name_plural = _('products')
def __unicode__(self):
return '#%s: %s' % (self.code, self.name)
@models.permalink
def get_absolute_url(self):
return ('product_detail', (), {"id": self.pk})
@models.permalink
def get_edit_url(self):
return ('product_edit', (), {"id": self.pk})
@models.permalink
def get_delete_url(self):
return ('product_delete', (), {"id": self.pk})
class ProductEntry(models.Model):
"""A set of instances of the same product.
"""
product = models.ForeignKey(Product, verbose_name=_('product'))
quantity = models.FloatField(default=1.0, verbose_name=_('quantity'))
unit_price = models.FloatField(null=True, blank=True, help_text=_("Keep it blank to use the product's default one"), verbose_name=_('unit price'))
tax = models.FloatField(null=True, blank=True, help_text=_("Keep it blank to use the product's default one"), verbose_name=_('tax (%)'))
discount = models.FloatField(null=True, blank=True, help_text=_("Keep it blank to use the product's default one"), verbose_name=_('discount (%)'))
notes = models.TextField(null=True, blank=True, verbose_name=_('notes'))
class Meta:
verbose_name = _('product entry')
verbose_name_plural = _('product entries')
def __unicode__(self):
return '%s (%d %s)' % (self.product, self.quantity, self.product.uos)
def get_absolute_url(self):
return self.product.get_absolute_url()
def clean(self):
product = self.product
if not self.unit_price and product:
self.unit_price = product.sales_price
if not self.tax and product:
self.tax = product.sales_tax
class Supply(models.Model):
"""Relation between a product and one of its supplier.
"""
product = models.ForeignKey(Product, verbose_name=_('product'))
supplier = models.ForeignKey('partners.Partner', limit_choices_to = {'is_supplier': True}, verbose_name=_('supplier'))
supply_method = models.CharField(max_length=10, choices=settings.PRODUCT_SUPPLY_METHODS, default=settings.PRODUCT_DEFAULT_SUPPLY_METHOD, verbose_name=_('supply method'))
name = models.CharField(max_length=255, null=True, blank=True, help_text=_("Product name used by the supplier"), verbose_name=_('ref. name'))
code = models.CharField(max_length=255, null=True, blank=True, help_text=_("Product code used by the supplier"), verbose_name=_('ref. code'))
purchase_price = models.FloatField(default=0.0, verbose_name=_('purchase price'))
purchase_currency = models.CharField(max_length=3, choices=settings.CURRENCIES, default=settings.DEFAULT_CURRENCY, verbose_name=_('purchase currency'))
max_purchase_discount = models.FloatField(default=0.0, verbose_name
|
=_('max purchase discount (%)'))
purchase_tax = models.FloatField(default=0.0
|
, verbose_name=_('purchase tax (%)'))
lead_time = models.PositiveIntegerField(default=1, verbose_name=_('lead time (days)'))
minimal_quantity = models.FloatField(default=1.0, verbose_name=_('minimal quantity'))
warranty_period = models.PositiveIntegerField(default=settings.PRODUCT_DEFAULT_WARRANTY_PERIOD, verbose_name=_('warranty period (days)'))
end_of_life = models.DateField(null=True, blank=True, verbose_name=_('end of life'))
class Meta:
ordering = ('product', 'supplier')
verbose_name = _('supply')
verbose_name_plural = _('supplies')
unique_together = (('product', 'supplier'),)
def __unicode__(self):
code = self.code or self.product.code
name = self.name or self.product.name
return '%s (%s)' % (self.product, self.supplier)
@models.permalink
def get_absolute_url(self):
return ('product_supply_detail', (), {"product_id": self.product.pk, "id": self.pk})
@models.permalink
def get_edit_url(self):
return ('product_edit_supply', (), {"product_id": self.product.pk, "id": self.pk})
@models.permalink
def get_delete_url(self):
return ('product_delete_supply', (), {"product_id": self.product.pk, "id": self.pk})
def _stream(self):
return [self.product.stream, self.supplier.stream]
stream = property(_stream)
|
sanguinariojoe/FreeCAD
|
src/Mod/Fem/femtaskpanels/task_element_fluid1D.py
|
Python
|
lgpl-2.1
| 22,417 | 0.001695 |
# ***************************************************************************
# * Copyright (c) 2016 Ofentse Kgoa <kgoaot@eskom.co.za> *
# * Copyright (c) 2018 Bernd Hahnebach <bernd@bimstatik.org> *
# * Based on the FemElementGeometry1D by Bernd Hahnebach *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of
|
*
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT
|
ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM element fluid 1D task panel for the document object"
__author__ = "Ofentse Kgoa, Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package task_element_fluid1D
# \ingroup FEM
# \brief task panel for element fluid 1D object
from PySide import QtCore
from PySide import QtGui
import FreeCAD
import FreeCADGui
from FreeCAD import Units
from femguiutils import selection_widgets
from femobjects import element_fluid1D
class _TaskPanel:
"""
The TaskPanel for editing References property of ElementFluid1D objects
"""
def __init__(self, obj):
self.obj = obj
# parameter widget
self.parameterWidget = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/ElementFluid1D.ui"
)
QtCore.QObject.connect(
self.parameterWidget.cb_section_type,
QtCore.SIGNAL("activated(int)"),
self.sectiontype_changed
)
QtCore.QObject.connect(
self.parameterWidget.cb_liquid_section_type,
QtCore.SIGNAL("activated(int)"),
self.liquidsectiontype_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_manning_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.manning_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_manning_radius,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.manning_radius_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_manning_coefficient,
QtCore.SIGNAL("valueChanged(double)"),
self.manning_coefficient_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_enlarge_area1,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.enlarge_area1_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_enlarge_area2,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.enlarge_area2_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_contract_area1,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.contract_area1_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_contract_area2,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.contract_area2_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_inletpressure,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.inlet_pressure_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_outletpressure,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.outlet_pressure_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_inletflowrate,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.inlet_flowrate_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_outletflowrate,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.outlet_flowrate_changed
)
QtCore.QObject.connect(
self.parameterWidget.gb_inletpressure,
QtCore.SIGNAL("clicked(bool)"),
self.inlet_pressure_active
)
QtCore.QObject.connect(
self.parameterWidget.gb_outletpressure,
QtCore.SIGNAL("clicked(bool)"),
self.outlet_pressure_active
)
QtCore.QObject.connect(
self.parameterWidget.gb_inletflowrate,
QtCore.SIGNAL("clicked(bool)"),
self.inlet_flowrate_active
)
QtCore.QObject.connect(
self.parameterWidget.gb_outletflowrate,
QtCore.SIGNAL("clicked(bool)"),
self.outlet_flowrate_active
)
QtCore.QObject.connect(
self.parameterWidget.if_entrance_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.entrance_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_entrance_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.entrance_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_diaphragm_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.diaphragm_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_diaphragm_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.diaphragm_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_bend_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.bend_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_bradius_pdiameter,
QtCore.SIGNAL("valueChanged(double)"),
self.bradius_pdiameter_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_bend_angle,
QtCore.SIGNAL("valueChanged(double)"),
self.bend_angle_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_bend_loss_coefficient,
QtCore.SIGNAL("valueChanged(double)"),
self.bend_loss_coefficient_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_gatevalve_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.gatevalve_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_gatevalve_closing_coeff,
QtCore.SIGNAL("valueChanged(double)"),
self.gatevalve_closing_coeff_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.colebrooke_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_radius,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.col
|
Wolfst0rm/Wolf-Cogs
|
quote/quote.py
|
Python
|
mit
| 2,264 | 0.025177 |
import discord
from discord.ext import commands
from random import choice as randomchoice
from .utils.dataIO import fileIO
from .utils import checks
import os
defaultQuotes = [
"Thats why I love switch hitting, I like to be in control ~ Jan, from the Hypermine Dragon Fight - 21st May 2016",
"Thank you for wwaking within our server today- That sounds wrong. That does not sound PG at all -Jandoncom 24/5/16",
"EVERYONE RUN! GECKOR IS DRIVING A TRUCK AGAIN /o\ ~ N7DeltaForce 03/06/16",
"Everyone wants a piece of this -Jandoncom 7/6/2016",
"I Want Khip Kho's Heart! ~ Jandoncom 7/6/2016"]
class Quote:
"""Quote System for Red-DiscordBot
Based on the MIRC Quote Script by Zsadist (Hawkee Link: http://hawkee.com/snippet/8378/ )"""
def __init__(self, bot):
self.bot = bot
self.quotes = fileIO("data/quote/quotes.json", "load")
def save_quotes(self):
fileIO("data/quote/quotes.json", 'save', self.quotes)
@commands.group(pass_context=True, invoke_without_command=True)
async def quote(self, ctx):
"""Random Quote to be Drawn"""
await self.bot.say("Quote: " + randomchoice(self.quotes) + " ")
@quote.command()
async def add(self, quote):
"""Adds a Quote to the List"""
if quote in self.quotes:
await self.bot.say("That quote is already in the database!")
else:
self.quotes.append(quote)
self.save_quotes()
await self.bot.say("Quote: " + quote + " has been saved to the database!")
@quote.command()
@checks.mod_or_permissions(adminstrator=True)
async def remove(self, quote):
"""Removes a Quote from the list"""
if qu
|
ote not in self.quotes:
await self.bot.say("That quote is already in the database!")
else:
self.quotes.remove(quote)
self.save_quotes()
await self.bot.say("Quote: " + quote + " has been removed from the database!")
def check_folder():
if not os.path.exists("data/quote"
|
):
print("Creating data/quote")
os.makedirs("data/quote")
def check_files():
fileName = "data/quote/quotes.json"
if not fileIO(fileName, "check"):
print("Creating Empty Quote.json File")
print("Creation Complete! Enjoy your new Quote System ~ Wolfstorm")
fileIO(fileName, "save", defaultQuotes)
def setup(bot):
check_folder()
check_files()
QuoteSystem = Quote(bot)
bot.add_cog(QuoteSystem)
|
minorua/QGIS
|
python/plugins/processing/algs/grass7/ext/v_in_geonames.py
|
Python
|
gpl-2.0
| 1,246 | 0 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_in_geonames.py
----------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
*
|
*
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def processCommand(alg, parameters, context, feedback):
# v.in.geonames needs to use WGS84 projection
alg.commands.append('g.proj -c epsg=4326')
# Launch the algorithm
alg.processCommand(parameters, context, feedback)
|
|
tsl143/zamboni
|
mkt/webapps/tests/test_models.py
|
Python
|
bsd-3-clause
| 97,405 | 0 |
# -*- coding: utf-8 -*-
import functools
import hashlib
import json
import os
import tempfile
import unittest
import uuid
import zipfile
from contextlib import nested
from datetime import datetime, timedelta
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core import mail
from django.core.urlresolvers import reverse
from django.db.models.signals import post_delete, post_save
from django.test.utils import override_settings
from django.utils import translation
import elasticsearch
import mock
from mock import patch
from nose.tools import eq_, ok_, raises
import mkt
from lib.utils import static_url
from mkt.constants import apps, MANIFEST_CONTENT_TYPE
from mkt.constants.applications import DEVICE_TYPES
from mkt.constants.iarc_mappings import (DESCS, INTERACTIVES, REVERSE_DESCS,
REVERSE_INTERACTIVES)
from mkt.constants.payments import PROVIDER_BANGO, PROVIDER_REFERENCE
from mkt.constants.regions import RESTOFWORLD
from mkt.developers.models import (AddonPaymentAccount, PaymentAccount,
SolitudeSeller)
from mkt.developers.providers import ALL_PROVIDERS
from mkt.files.models import File
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.files.utils import WebAppParser
from mkt.prices.models import AddonPremium, Price, PriceCurrency
from mkt.reviewers.models import EscalationQueue, QUEUE_TARAKO, RereviewQueue
from mkt.site.fixtures import fixture
from mkt.site.helpers import absolutify
from mkt.site.storage_utils import (public_storage, private_storage,
storage_is_remote)
from mkt.site.tests import (DynamicBoolFieldsTestMixin, ESTestCase,
TestCase, WebappTestCase, user_factory)
from mkt.site.utils import app_factory, version_factory
from mkt.submit.tests.test_views import BasePackagedAppTest, BaseWebAppTest
from mkt.translations.models import Translation
from mkt.users.models import UserProfile
from mkt.versions.mod
|
els import update_status, Version
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import (AddonDeviceType, AddonExcludedRegion,
AddonUpsell, AppFeatures, AppManifest,
BlockedSlug, ContentRating, Geodata,
|
get_excluded_in, IARCInfo, Installed, Preview,
RatingDescriptors, RatingInteractives,
version_changed, Webapp)
from mkt.webapps.signals import version_changed as version_changed_signal
class TestWebapp(WebappTestCase):
def add_payment_account(self, app, provider_id, user=None):
if not user:
user = user_factory()
payment = PaymentAccount.objects.create(
solitude_seller=SolitudeSeller.objects.create(user=user,
uuid=uuid.uuid4()),
provider=provider_id,
user=user,
seller_uri=uuid.uuid4(),
uri=uuid.uuid4())
return AddonPaymentAccount.objects.create(
addon=app, payment_account=payment, product_uri=uuid.uuid4())
def test_get_icon_url(self):
app = self.get_app()
if storage_is_remote():
path = '%s/%s-%s.png' % (app.get_icon_dir(), app.pk, 32)
expected = '%s?modified=never' % public_storage.url(path)
else:
expected = (static_url('ADDON_ICON_URL')
% (str(app.id)[0:3], app.id, 32, 'never'))
assert app.get_icon_url(32).endswith(expected), (
'Expected %s, got %s' % (expected, app.icon_url))
app.icon_hash = 'abcdef'
assert app.get_icon_url(32).endswith('?modified=abcdef')
app.icon_type = None
assert app.get_icon_url(32).endswith('hub/default-32.png')
def test_get_promo_img_url(self):
app = self.get_app()
eq_(app.get_promo_img_url('640'), '')
eq_(app.get_promo_img_url('1050'), '')
app.promo_img_hash = 'chicken'
ok_('webapp_promo_imgs/337/337141-640.png?modified=chicken' in
app.get_promo_img_url('640'))
ok_('webapp_promo_imgs/337/337141-1050.png?modified=chicken' in
app.get_promo_img_url('1050'))
def test_has_payment_account(self):
app = self.get_app()
assert not app.has_payment_account()
self.add_payment_account(app, PROVIDER_BANGO)
assert app.has_payment_account()
def test_has_multiple_payment_accounts(self):
app = self.get_app()
assert not app.has_multiple_payment_accounts(), 'no accounts'
account = self.add_payment_account(app, PROVIDER_BANGO)
assert not app.has_multiple_payment_accounts(), 'one account'
self.add_payment_account(app, PROVIDER_REFERENCE, user=account.user)
ok_(app.has_multiple_payment_accounts(), 'two accounts')
def test_no_payment_account(self):
app = self.get_app()
assert not app.has_payment_account()
with self.assertRaises(app.PayAccountDoesNotExist):
app.payment_account(PROVIDER_BANGO)
def test_get_payment_account(self):
app = self.get_app()
acct = self.add_payment_account(app, PROVIDER_BANGO)
fetched_acct = app.payment_account(PROVIDER_BANGO)
eq_(acct, fetched_acct)
def test_delete_reason(self):
"""Test deleting with a reason gives the reason in the mail."""
app = self.get_app()
reason = u'trêason'
eq_(len(mail.outbox), 0)
app.delete(msg='bye', reason=reason)
eq_(len(mail.outbox), 1)
assert reason in mail.outbox[0].body
def test_soft_deleted(self):
app = self.get_app()
eq_(len(Webapp.objects.all()), 1)
eq_(len(Webapp.with_deleted.all()), 1)
app.delete('boom shakalakalaka')
eq_(len(Webapp.objects.all()), 0)
eq_(len(Webapp.with_deleted.all()), 1)
# When an app is deleted its slugs and domain should get relinquished.
post_mortem = Webapp.with_deleted.filter(id=app.id)
eq_(post_mortem.count(), 1)
eq_(getattr(post_mortem[0], 'app_domain'), None)
eq_(getattr(post_mortem[0], 'app_slug'), '337141')
def test_soft_deleted_valid(self):
app = self.get_app()
Webapp.objects.create(status=mkt.STATUS_DELETED)
eq_(list(Webapp.objects.valid()), [app])
eq_(list(Webapp.with_deleted.valid()), [app])
def test_delete_incomplete_with_deleted_version(self):
"""Test deleting incomplete add-ons with no public version attached."""
app = self.get_app()
app.current_version.delete()
eq_(Version.objects.count(), 0)
eq_(Version.with_deleted.count(), 1)
app.update(status=0, highest_status=0)
# We want to be in the worst possible situation, no direct foreign key
# to the deleted versions, do we call update_version() now that we have
# an incomplete app.
app.update_version()
eq_(app.latest_version, None)
eq_(app.current_version, None)
app.delete()
# The app should have been soft-deleted.
eq_(len(mail.outbox), 1)
eq_(Webapp.objects.count(), 0)
eq_(Webapp.with_deleted.count(), 1)
def test_get_price(self):
app = self.get_app()
self.make_premium(app)
eq_(app.get_price(region=mkt.regions.USA.id), 1)
def test_get_price_tier(self):
app = self.get_app()
self.make_premium(app)
eq_(str(app.get_tier().price), '1.00')
ok_(app.get_tier_name())
def test_get_price_tier_no_charge(self):
app = self.get_app()
self.make_premium(app, 0)
eq_(str(app.get_tier().price), '0')
ok_(app.get_tier_name())
@mock.patch('mkt.versions.models.Version.is_privileged', True)
def test_app_type_privileged(self):
app = self.get_app()
app.update(is_packaged=True)
eq_(app.app_type, 'privileged')
def test_excluded_in(sel
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamlink.base/resources/lib/streamlink/plugins/mitele.py
|
Python
|
gpl-2.0
| 2,844 | 0.001758 |
import logging
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils import parse_json
log = logging.getLogger(__name__)
class Mitele(Plugin):
_url_re = re.compile(r"https?://(?:www\.)?mitele\.es/directo/(?P<channel>[\w-]+)")
|
pdata_url = "https://indalo.mediaset.es/mmc-player/api/mmc/v1/{channel}/live/html5.json"
gate_url = "https://gatekeeper.mediaset.es"
error_schema
|
= validate.Schema({
"code": validate.any(validate.text, int),
"message": validate.text,
})
pdata_schema = validate.Schema(validate.transform(parse_json), validate.any(
validate.all(
{
"locations": [{
"gcp": validate.text,
"ogn": validate.any(None, validate.text),
}],
},
validate.get("locations"),
validate.get(0),
),
error_schema,
))
gate_schema = validate.Schema(
validate.transform(parse_json),
validate.any(
{
"mimeType": validate.text,
"stream": validate.url(),
},
error_schema,
)
)
def __init__(self, url):
super(Mitele, self).__init__(url)
self.session.http.headers.update({
"User-Agent": useragents.FIREFOX,
"Referer": self.url
})
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
channel = self._url_re.match(self.url).group("channel")
pdata = self.session.http.get(self.pdata_url.format(channel=channel),
acceptable_status=(200, 403, 404),
schema=self.pdata_schema)
log.trace("{0!r}".format(pdata))
if pdata.get("code"):
log.error("{0} - {1}".format(pdata["code"], pdata["message"]))
return
gdata = self.session.http.post(self.gate_url,
acceptable_status=(200, 403, 404),
data=pdata,
schema=self.gate_schema)
log.trace("{0!r}".format(gdata))
if gdata.get("code"):
log.error("{0} - {1}".format(gdata["code"], gdata["message"]))
return
log.debug("Stream: {0} ({1})".format(gdata["stream"], gdata.get("mimeType", "n/a")))
for s in HLSStream.parse_variant_playlist(self.session,
gdata["stream"],
name_fmt="{pixels}_{bitrate}").items():
yield s
__plugin__ = Mitele
|
TeamSPoon/logicmoo_workspace
|
packs_web/butterfly/lib/python3.7/site-packages/flake8/utils.py
|
Python
|
mit
| 15,155 | 0 |
"""Utility methods for flake8."""
import collections
import fnmatch as _fnmatch
import inspect
import io
import logging
import os
import platform
import re
import sys
import tokenize
from typing import Callable, Dict, Generator, List, Optional, Pattern
from typing import Sequence, Set, Tuple, Union
from flake8 import exceptions
from flake8._compat import lru_cache
if False: # `typing.TYPE_CHECKING` was introduced in 3.5.2
from flake8.plugins.manager import Plugin
DIFF_HUNK_REGEXP = re.compile(r"^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$")
COMMA_SEPARATED_LIST_RE = re.compile(r"[,\s]")
LOCAL_PLUGIN_LIST_RE = re.compile(r"[,\t\n\r\f\v]")
string_types = (str, type(u""))
def parse_comma_separated_list(value, regexp=COMMA_SEPARATED_LIST_RE):
# type: (str, Pattern[str]) -> List[str]
"""Parse a comma-separated list.
:param value:
String to be parsed and normalized.
:param regexp:
Compiled regular expression used to split the value when it is a
string.
:type regexp:
_sre.SRE_Pattern
:returns:
List of values with whitespace stripped.
:rtype:
list
"""
assert isinstance(value, string_types), value
separated = regexp.split(value)
item_gen = (item.strip() for item in separated)
return [item for item in item_gen if item]
_Token = collections.namedtuple("Token", ("tp", "src"))
_CODE, _FILE, _COLON, _COMMA, _WS = "code", "file", "colon", "comma", "ws"
_EOF = "eof"
_FILE_LIST_TOKEN_TYPES = [
(re.compile(r"[A-Z]+[0-9]*(?=$|\s|,)"), _CODE),
(re.compile(r"[^\s:,]+"), _FILE),
(re.compile(r"\s*:\s*"), _COLON),
(re.compile(r"\s*,\s*"), _COMMA),
(re.compile(r"\s+"), _WS),
]
def _tokenize_files_to_codes_mapping(value):
# type: (str) -> List[_Token]
tokens = []
i = 0
while i < len(value):
for token_re, token_name in _FILE_LIST_TOKEN_TYPES:
match = token_re.match(value, i)
if match:
tokens.append(_Token(token_name, match.group().strip()))
i = match.end()
break
else:
raise AssertionError("unreachable", value, i)
tokens.append(_Token(_EOF, ""))
return tokens
def parse_files_to_codes_mapping(value_): # noqa: C901
# type: (Union[Sequence[str], str]) -> List[Tuple[str, List[str]]]
"""Parse a files-to-codes mapping.
A files-to-codes mapping a sequence of values specified as
`filenames list:codes list ...`. Each of the lists may be separated by
either comma or whitespace tokens.
:param value: String to be parsed and normalized.
:type value: str
"""
if not isinstance(value_, string_types):
value = "\n".join(value_)
else:
value = value_
ret = [] # type: List[Tuple[str, List[str]]]
if not value.strip():
return ret
class State:
seen_sep = True
seen_colon = False
filenames = [] # type: List[str]
codes = [] # type: List[str]
def _reset(): # type: () -> None
if State.codes:
for filename in State.filenames:
ret.append((filename, State.codes))
State.seen_sep = True
State.seen_colon = False
State.filenames = []
State.codes = []
def _unexpected_token(): # type: () -> exceptions.ExecutionError
def _indent(s): # type: (str) -> str
return " " + s.strip().replace("\n", "\n ")
return exceptions.ExecutionError(
"Expected `per-file-ignores` to be a mapping from file exclude "
"patterns to ignore codes.\n\n"
"Configured `per-file-ignores` setting:\n\n{}".format(
_indent(value)
)
)
for token in _tokenize_files_to_codes_mapping(value):
# legal in any state: separator sets the sep bit
if token.tp in {_COMMA, _WS}:
State.seen_sep = True
# looking for filenames
elif not State.seen_colon:
if token.tp == _COLON:
State.seen_colon = True
State.seen_sep = True
elif State.seen_sep and token.tp == _FILE:
State.filenames.append(token.src)
State.seen_sep = False
else:
raise _unexpected_token()
# looking for codes
else:
if token.tp == _EOF:
_reset()
elif State.seen_sep and token.tp == _CODE:
State.codes.append(token.src)
State.seen_sep = False
elif State.seen_sep and token.tp == _FILE:
_reset()
State.filenames.append(token.src)
State.seen_sep = False
else:
raise _unexpected_token()
return ret
def normalize_paths(paths, parent=os.curdir):
# type: (Sequence[str], str) -> List[str]
"""Normalize a list of paths relative to a parent directory.
:returns:
The normalized paths.
:rtype:
[str]
"""
assert isinstance(paths, list), paths
return [normalize_path(p, parent) for p in paths]
def normalize_path(path, parent=os.curdir):
# type: (str, str) -> str
"""Normalize a single-path.
:returns:
The normalized path.
:rtype:
str
"""
# NOTE(sigmavirus24): Using os.path.sep and os.path.altsep allow for
# Windows compatibility with both Windows-style paths (c:\\foo\bar) and
# Unix style paths (/foo/bar).
separator = os.path.sep
# NOTE(sigmavirus24): os.path.altsep may be None
alternate_separator = os.path.altsep or ""
if separator in path or (
alternate_separator and alternate_separator in path
):
path = os.path.abspath(os.path.join(parent, path))
return path.rstrip(separator + alternate_separator)
def _stdin_get_value_py3(): # type: (
|
) -> str
stdin_value = sys.stdin.buffer.read()
fd = io.BytesIO(stdin_value)
try:
coding, _ = tokenize.detect_encoding(fd.readline)
return stdin_value.decode(coding)
except (LookupError, SyntaxError, UnicodeError):
return stdin_value.decode("utf-8")
@lru_cache(maxsize=1)
def stdin_get_value(): # type: () -> str
|
"""Get and cache it so plugins can use it."""
if sys.version_info < (3,):
return sys.stdin.read()
else:
return _stdin_get_value_py3()
def stdin_get_lines(): # type: () -> List[str]
"""Return lines of stdin split according to file splitting."""
if sys.version_info < (3,):
return list(io.BytesIO(stdin_get_value()))
else:
return list(io.StringIO(stdin_get_value()))
def parse_unified_diff(diff=None):
# type: (Optional[str]) -> Dict[str, Set[int]]
"""Parse the unified diff passed on stdin.
:returns:
dictionary mapping file names to sets of line numbers
:rtype:
dict
"""
# Allow us to not have to patch out stdin_get_value
if diff is None:
diff = stdin_get_value()
number_of_rows = None
current_path = None
parsed_paths = collections.defaultdict(set) # type: Dict[str, Set[int]]
for line in diff.splitlines():
if number_of_rows:
# NOTE(sigmavirus24): Below we use a slice because stdin may be
# bytes instead of text on Python 3.
if line[:1] != "-":
number_of_rows -= 1
# We're in the part of the diff that has lines starting with +, -,
# and ' ' to show context and the changes made. We skip these
# because the information we care about is the filename and the
# range within it.
# When number_of_rows reaches 0, we will once again start
# searching for filenames and ranges.
continue
# NOTE(sigmavirus24): Diffs that we support look roughly like:
# diff a/file.py b/file.py
# ...
# --- a/file.py
# +++ b/file.py
# Below we're looking for that last line. Every diff tool that
# gives us this output may have additional information after
# ``b/file.py`` which it will separate with
|
dbbhattacharya/kitsune
|
vendor/packages/pylint/test/input/func_use_for_or_listcomp_var.py
|
Python
|
bsd-3-clause
| 560 | 0.014286 |
"""test a warning is triggered when using for a lists comprehension variable"""
__revision__ = 'yo'
TEST_LC = [C for C in __revision
|
__ if C.isalpha()]
print C # WARN
C = 4
print C # this one shouldn't trigger any warning
B = [B for B in __revision__ if B.isalpha()]
print B # nor this one
for var1, var2 in TEST_LC:
var1 = var2 + 4
print var1 # WARN
for note in __revision__:
note.something()
for line in __revision__:
for note in line:
A = note.anotherthing()
for x in []:
pass
for x in range(3):
print (lambd
|
a : x)() # OK
|
maxBombrun/lipidDroplets
|
settings.py
|
Python
|
bsd-3-clause
| 1,178 | 0.034805 |
# settings.py
#######################################################
#
# Definition of the different paths:
# - CellProfiler (Software, input, output)
# - Input
# - Output
#
#######################################################
import os
def init():
global pathList
CPPath= "D:/Logiciel/CellProfiler2.2/CellProfiler.exe"
inputDataPath= "C:/Data/Granulometry/Data/"
if inputDataPath[-1] != '/' :
inputDataPath=inputDataPath+'/';
resultPath= "./../Results/"
colorDisplayPath =resultPath +"colorDisplay/"
outputDetPath = resultPath + "outputResults/"
inputCellProfilerPath =resultPath +"inputCP/"
outputCellProfilerPath =resultPath +"CPResults/"
if not os.path.isdir(resultPath):
os.mkdir(resultPath)
if not os.path.isdir(colorDisplayPath):
os.mkdir(colorDisplayPath)
if not o
|
s.path.isdir(outputDetPath):
os.mkdir(outputDetPath)
if not os.path.isdir(inputCellProfilerPath):
os.mkdir(inputCellProfilerPath)
if not os.path.isdir(outputCellProfilerPath)
|
:
os.mkdir(outputCellProfilerPath)
pathList = [CPPath]+ [inputDataPath] + [resultPath] + [outputDetPath] + [inputCellProfilerPath] + [outputCellProfilerPath]
|
CruiseDevice/coala
|
tests/coalaFormatTest.py
|
Python
|
agpl-3.0
| 1,124 | 0 |
import os
import re
import sys
import unittest
fr
|
om coalib import coala_format
from coalib.misc.
|
ContextManagers import prepare_file
from tests.TestUtilities import bear_test_module, execute_coala
class coalaFormatTest(unittest.TestCase):
def setUp(self):
self.old_argv = sys.argv
def tearDown(self):
sys.argv = self.old_argv
def test_line_count(self):
with bear_test_module(), \
prepare_file(["#fixme"], None) as (lines, filename):
retval, output = execute_coala(coala_format.main, "coala-format",
"-c", os.devnull,
"-f", re.escape(filename),
"-b", "LineCountTestBear")
self.assertRegex(output, r'msg:This file has [0-9]+ lines.',
"coala-format output for line count should "
"not be empty")
self.assertEqual(retval, 1,
"coala-format must return exitcode 1 when it "
"yields results")
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_10_01/operations/_operations.py
|
Python
|
mit
| 5,377 | 0.004092 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
**kwargs: Any
) -> HttpRequest:
api_version = "2019-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Resources/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.OperationListResult"]:
"""Lists all of the available Microsoft.Resources REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_10_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
|
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, st
|
ream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Resources/operations'} # type: ignore
|
1flow/1flow
|
oneflow/core/migrations/0036_auto__add_userfeeds__add_usersubscriptions.py
|
Python
|
agpl-3.0
| 37,603 | 0.007872 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserFeeds'
db.create_table(u'core_userfeeds', (
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='feeds', unique=True, primary_key=True, to=orm['base.User'])),
('imported_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='imported_items', unique=True, null=True, to=orm['core.BaseFee
|
d'])),
('sent_items', self.gf('dj
|
ango.db.models.fields.related.OneToOneField')(blank=True, related_name='sent_items', unique=True, null=True, to=orm['core.BaseFeed'])),
('received_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='received_items', unique=True, null=True, to=orm['core.BaseFeed'])),
('written_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='written_items', unique=True, null=True, to=orm['core.BaseFeed'])),
))
db.send_create_signal('core', ['UserFeeds'])
# Adding M2M table for field blogs on 'UserFeeds'
m2m_table_name = db.shorten_name(u'core_userfeeds_blogs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('userfeeds', models.ForeignKey(orm['core.userfeeds'], null=False)),
('basefeed', models.ForeignKey(orm['core.basefeed'], null=False))
))
db.create_unique(m2m_table_name, ['userfeeds_id', 'basefeed_id'])
# Adding model 'UserSubscriptions'
db.create_table(u'core_usersubscriptions', (
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='subscriptions', unique=True, primary_key=True, to=orm['base.User'])),
('imported_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='imported_items', unique=True, null=True, to=orm['core.Subscription'])),
('sent_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='sent_items', unique=True, null=True, to=orm['core.Subscription'])),
('received_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='received_items', unique=True, null=True, to=orm['core.Subscription'])),
('written_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='written_items', unique=True, null=True, to=orm['core.Subscription'])),
))
db.send_create_signal('core', ['UserSubscriptions'])
# Adding M2M table for field blogs on 'UserSubscriptions'
m2m_table_name = db.shorten_name(u'core_usersubscriptions_blogs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('usersubscriptions', models.ForeignKey(orm['core.usersubscriptions'], null=False)),
('subscription', models.ForeignKey(orm['core.subscription'], null=False))
))
db.create_unique(m2m_table_name, ['usersubscriptions_id', 'subscription_id'])
def backwards(self, orm):
# Deleting model 'UserFeeds'
db.delete_table(u'core_userfeeds')
# Removing M2M table for field blogs on 'UserFeeds'
db.delete_table(db.shorten_name(u'core_userfeeds_blogs'))
# Deleting model 'UserSubscriptions'
db.delete_table(u'core_usersubscriptions')
# Removing M2M table for field blogs on 'UserSubscriptions'
db.delete_table(db.shorten_name(u'core_usersubscriptions_blogs'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': 'a2a11d045c484d9cb16448cca4075f1d'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'n
|
sid88in/incubator-airflow
|
tests/sensors/test_sql_sensor.py
|
Python
|
apache-2.0
| 2,780 | 0.001799 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import unittest
from airflow import DAG
from airflow import configuration
from airflow.sensors.sql_sensor import SqlSensor
from airflow.utils.timezone import datetime
configuration.load_test_config()
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_sql_dag'
class SqlSensorTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
@unittest.skipUnless(
'mysql' in configuration.conf.get('core', '
|
sql_alchemy_conn'), "this is a mysql test")
def test_sql_sensor_mysql(self):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='mysql_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE
|
, ignore_ti_state=True)
@unittest.skipUnless(
'postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'), "this is a postgres test")
def test_sql_sensor_postgres(self):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke(self, mock_hook):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(t.poke(None))
mock_get_records.return_value = [['1']]
self.assertTrue(t.poke(None))
|
divio/django-shop
|
shop/migrations/0009_delete_email.py
|
Python
|
bsd-3-clause
| 361 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-25 19:51
from __future__ import unicode_
|
literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_notification_recipient'),
]
operations = [
migrations.DeleteModel(
name='Email'
|
,
),
]
|
fireduck64/electrum
|
lib/dnssec.py
|
Python
|
mit
| 10,409 | 0.00221 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import traceback
import sys
import time
import struct
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from dns.exception import DNSException
"""
Pure-Python version of dns.dnssec._validate_rsig
"""
import ecdsa
import rsakey
def python_validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
from dns.dnssec import ValidationFailure, ECDSAP256SHA256, ECDSAP384SHA384
from dns.dnssec import _find_candidate_keys, _make_hash, _is_ecdsa, _is_rsa, _to_rdata, _make_algorithm_id
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin, dns.name.root)
for candidate_key in _find_candidate_keys(keys, rrsig):
if not candidate_key:
raise ValidationFailure, 'unknown key'
# For convenience, allow the rrset to be specified as a (name, rdataset)
# tuple as well as a proper rrset
if isinstance(rrset, tuple):
rrname = rrset[0]
rdataset = rrset[1]
else:
rrname = rrset.name
rdataset = rrset
if now is None:
now = time.time()
if rrsig.expiration < now:
raise ValidationFailure, 'expired'
if rrsig.inception > now:
raise ValidationFailure, 'not yet valid'
hash = _make_hash(rrsig.algorithm)
if _is_rsa(rrsig.algorithm):
keyptr = candidate_key.key
(bytes,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
if bytes == 0:
(bytes,) = struct.unpack('!H', keyptr[0:2])
keyptr = keyptr[2:]
rsa_e = keyptr[0:bytes]
rsa_n = keyptr[bytes:]
n = ecdsa.util.string_to_number(rsa_n)
e = ecdsa.util.string_to_number(rsa_e)
pubkey = rsakey.RSAKey(n, e)
sig = rrsig.signature
elif _is_ecdsa(rrsig.algorithm):
if rrsig.algorithm == ECDSAP256SHA256:
curve = ecdsa.curves.NIST256p
key_len = 32
digest_len = 32
elif rrsig.algorithm == ECDSAP384SHA384:
curve = ecdsa.curves.NIST384p
key_len = 48
digest_len = 48
else:
# shouldn't happen
raise ValidationFailure, 'unknown ECDSA curve'
keyptr = candidate_key.key
x = ecdsa.util.string_to_number(keyptr[0:key_len])
y = ecdsa.util.string_to_number(keyptr[key_len:key_len * 2])
assert ecdsa.ecdsa.point_is_valid(curve.generator, x, y)
point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point, curve)
r = rrsig.signature[:key_len]
s = rrsig.signature[key_len:]
sig = ecdsa.ecdsa.Signature(ecdsa.util.string_to_number(r),
ecdsa.util.string_to_number(s))
else:
raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
hash.update(_to_rdata(rrsig, origin)[:18])
hash.update(rrsig.signer.to_digestable(origin))
if rrsig.labels < len(rrname) - 1:
suffix = rrname.split(rrsig.labels + 1)[1]
rrname = dns.name.from_text('*', suffix)
rrnamebuf = rrname.to_digestable(origin)
rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
rrsig.original_ttl)
rrlist = sorted(rdataset);
for rr in rrlist:
hash.update(rrnamebuf)
hash.update(rrfixed)
rrdata = rr.to_digestable(origin)
rrlen = struct.pack('!H', len(rrdata))
hash.update(rrlen)
hash.update(rrdata)
digest = hash.digest()
if _is_rsa(rrsig.algorithm):
digest = _make_algorithm_id(rrsig.algorithm) + digest
if pubkey.verify(bytearray(sig), bytearray(digest)):
return
elif _is_ecdsa(rrsig.algorithm):
diglong = ecdsa.util.string_to_number(digest)
if verifying_key.pubkey.verifies(diglong, sig):
return
else:
raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
raise ValidationFailure, 'verify failure'
# replace validate_rrsig
dns.dnssec._validate_rrsig = python_validate_rrsig
dns.dnssec.validate_rrsig = python_validate_rrsig
dns.dnssec.validate = dns.dnssec._validate
from util import print_error
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLv
|
N7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY',
|
'257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise BaseException('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
|
open-synergy/runbot-addons
|
runbot_build_instructions/runbot_build.py
|
Python
|
agpl-3.0
| 6,892 | 0.000145 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import sys
import shutil
import openerp
from openerp import api
from openerp.osv import orm, fields
from openerp.addons.runbot.runbot import mkdirs
_logger = logging.getLogger(__name__)
MAGIC_PID_RUN_NEXT_JOB = -2
def custom_build(func):
"""Decorator for functions which should be overwritten only if
is_custom_build is enabled in repo.
"""
def custom_func(self, cr, uid, ids, context=None):
args = [
('id', 'in', ids),
('branch_id.repo_id.is_custom_build', '=', True)
]
custom_ids = self.search(cr, uid, args, context=context)
regular_ids = list(set(ids) - set(custom_ids))
ret = None
if regular_ids:
regular_func = getattr(super(runbot_build, self), func.func_name)
ret = regular_func(cr, uid, regular_ids, context=context)
if custom_ids:
assert ret is None
ret = func(self, cr, uid, custom_ids, context=context)
return ret
return custom_func
class runbot_build(orm.Model):
_inherit = "runbot.build"
_columns = {
'prebuilt': fields.boolean("Prebuilt"),
}
def job_00_init(self, cr, uid, build, lock_path, log_path):
res = super(runbot_build, self).job_00_init(
cr, uid, build, lock_path, log_path
)
if build.branch_id.repo_id.is_custom_build:
build.pre_build(lock_path, log_path)
build.prebuilt = True
return res
def job_10_test_base(self, cr, uid, build, lock_path, log_path):
if build.branch_id.repo_id.skip_test_jobs:
_logger.info('skipping job_10_test_base')
return MAGIC_PID_RUN_NEXT_JOB
else:
return super(runbot_build, self).job_10_test_base(
cr, uid, build, lock_path, log_path
)
def job_20_test_all(self, cr, uid, build, lock_path, log_path):
if build.branch_id.repo_id.skip_test_jobs:
_logger.info('skipping job_20_test_all')
with open(log_path,
|
'w') as f:
|
f.write('consider tests as passed: '
'.modules.loading: Modules loaded.')
return MAGIC_PID_RUN_NEXT_JOB
else:
return super(runbot_build, self).job_20_test_all(
cr, uid, build, lock_path, log_path
)
def sub_cmd(self, build, cmd):
if not cmd:
return []
if isinstance(cmd, basestring):
cmd = cmd.split()
internal_vals = {
'custom_build_dir': build.repo_id.custom_build_dir or '',
'custom_server_path': build.repo_id.custom_server_path,
'other_repo_path': build.repo_id.other_repo_id.path or '',
'build_dest': build.dest,
}
return [i % internal_vals for i in cmd]
def pre_build(self, cr, uid, ids, lock_path, log_path, context=None):
"""Run pre-build command if there is one
Substitute path variables after splitting command to avoid problems
with spaces in internal variables.
Run command in build path to avoid relative path issues.
"""
pushd = os.getcwd()
try:
for build in self.browse(cr, uid, ids, context=context):
if build.prebuilt:
continue
cmd = self.sub_cmd(build, build.repo_id.custom_pre_build_cmd)
if not cmd:
continue
os.chdir(build.path())
self.spawn(cmd, lock_path, log_path)
finally:
os.chdir(pushd)
@custom_build
def checkout(self, cr, uid, ids, context=None):
"""Checkout in custom build directories if they are specified
Do same as superclass except for git_export path.
"""
for build in self.browse(cr, uid, ids, context=context):
if build.prebuilt:
continue
# starts from scratch
if os.path.isdir(build.path()):
shutil.rmtree(build.path())
# runbot log path
mkdirs([build.path("logs")])
# checkout branch
build_path = build.path()
custom_build_dir = build.repo_id.custom_build_dir
if custom_build_dir:
mkdirs([build.path(custom_build_dir)])
build_path = os.path.join(build_path, custom_build_dir)
build.repo_id.git_export(build.name, build_path)
@custom_build
def cmd(self, cr, uid, ids, context=None):
"""Get server start script from build config
Overwrite superclass completely
Specify database user in the case of custom config, to allow viewing
after db has been created by Odoo (using current user).
Disable multiworker
"""
build = self.browse(cr, uid, ids[0], context=context)
server_path = build.path(build.repo_id.custom_server_path)
mods = build.repo_id.modules or "base"
params = self.sub_cmd(build, build.repo_id.custom_server_params)
# commandline
cmd = [
sys.executable,
server_path,
"--no-xmlrpcs",
"--xmlrpc-port=%d" % build.port,
"--db_user=%s" % openerp.tools.config['db_user'],
"--workers=0",
] + params
return cmd, mods
@api.cr_uid_ids_context
def server(self, cr, uid, ids, *l, **kw):
for build in self.browse(cr, uid, ids, context=None):
if build.repo_id.is_custom_build:
custom_odoo_path = build.repo_id.custom_odoo_path
if custom_odoo_path and\
os.path.exists(build.path(custom_odoo_path)):
return build.path(custom_odoo_path, *l)
return super(runbot_build, self).server(cr, uid, ids, *l, **kw)
|
atvcaptain/enigma2
|
lib/python/Screens/TaskView.py
|
Python
|
gpl-2.0
| 5,728 | 0.027235 |
from __future__ import absolute_import
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import ConfigSubsection, ConfigSelection, getConfigListEntry
from Components.SystemInfo import SystemInfo
from Components.Task import job_manager
from Screens.InfoBarGenerics import InfoBarNotifications
import Screens.Standby
import Tools.Notifications
from boxbranding import getMachineBrand, getMachineName
class JobView(InfoBarNotifications, Screen, ConfigListScreen):
def __init__(self, session, job, parent=None, cancelable = True, backgroundable = True, afterEventChangeable = True , afterEvent="nothing"):
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Sources.Boolean import Boolean
from Components.ActionMap import ActionMap
Screen.__init__(self, session, parent)
Screen.setTitle(self, _("Job View"))
InfoBarNotifications.__init__(self)
ConfigListScreen.__init__(self, [])
self.parent = parent
self.job = job
if afterEvent:
self.job.afterEvent = afterEvent
self["job_name"] = StaticText(job.name)
self["job_progress"] = Progress()
self["job_task"] = StaticText()
self["summary_job_name"] = StaticText(job.name)
self["summary_job_progress"] = Progress()
self["summary_job_task"] = StaticText()
self["job_status"] = StaticText()
self["finished"] = Boolean()
self["cancelable"] = Boolean(cancelable)
self["backgroundable"] = Boolean(backgroundable)
self["key_blue"] = StaticText(_("Background"))
self.onShow.append(self.windowShow)
self.onHide.append(self.windowHide)
self["setupActions"] = ActionMap(["ColorActions", "SetupActions"],
{
"green": self.ok,
"red": self.abort,
"blue": self.background,
"cancel": self.abort,
"ok": self.ok,
}, -2)
self.settings = ConfigSubsection()
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.settings.afterEvent = ConfigSelection(choices = [("nothing", _("do nothing")), ("close", _("Close")), ("standby", _("go to standby")), ("deepstandby", shutdownString)], default = self.job.afterEvent or "nothing")
self.job.afterEvent = self.settings.afterEvent.value
self.afterEventChangeable = afterEventChangeable
self.setupList()
self.state_changed()
def setupList(self):
if self.afterEventChangeable:
self["config"].setList( [ getConfigListEntry(_("After event"), self.settings.afterEvent) ])
else:
self["config"].hide()
self.job.afterEvent = self.settings.afterEvent.value
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setupList()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setupList()
def windowShow(self):
job_manager.visible = True
self.job.state_changed.append(self.state_changed)
def windowHide(self):
job_manager.visible = False
if len(self.job.state_changed) > 0:
self.job.state_changed.remove(self.state_changed)
def state_changed(self):
j = self.job
self["job_progress"].range = j.end
self["summary_job_progress"].range = j.end
self["job_progress"].value = j.progress
self["summary_job_progress"].value = j.progress
#print "JobView::state_changed:", j.end, j.progress
self["job_status"].text = j.getStatustext()
if j.status == j.IN_PROGRESS:
self["job_task"].text = j.tasks[j.current_task].name
self["summary_job_task"].text = j.tasks[j.current_task].name
else:
self["job_task"].text = ""
self["summary_job_task"].text = j.getStatustext()
if j.status in (j.FINISHED, j.FAILED):
self.performAfterEvent()
self["backgroundable"].boolean = False
if j.status == j.FINISHED:
self["finished"].boolean = True
self["cancelable"].boolean = False
elif j.status == j.FAILED:
self["cancelable"].boolean = True
def background(self):
if self["backgroundable"].boolean:
self.close(True)
def ok(self):
if self.job.status in (self.job.FINISHED, self.job.FAILED):
self.close(False)
else:
self.background()
def abort(self):
if self.job.status == self.job.NOT_STARTED:
job_manager.active_jobs.remove(self.job)
self.close(False)
elif self.job.status == self.job.IN_PROGRESS and self["cancelable"].boolean == True:
self.job.cancel()
else:
self.close(False)
def performAfterEvent(self):
self["config"].hide()
if self.settings.afterEvent.value == "nothing":
return
elif self.settings.afterEvent.value == "close" and self.job.status == self.job.FINISHED:
self.close(False)
from Screens.MessageBox impor
|
t MessageBox
if self.settings.afterEvent.value == "deepstandby":
if not Screens.Standby.inTryQuitMainloop:
Tools.Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A sleep timer wants to shut down\nyour %s %s. Shutdown now?") % (getMachineB
|
rand(), getMachineName()), timeout = 20)
elif self.settings.afterEvent.value == "standby":
if not Screens.Standby.inStandby:
Tools.Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A sleep timer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout = 20)
def checkNotifications(self):
InfoBarNotifications.checkNotifications(self)
if not Tools.Notifications.notifications:
if self.settings.afterEvent.value == "close" and self.job.status == self.job.FAILED:
self.close(False)
def sendStandbyNotification(self, answer):
if answer:
Tools.Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Tools.Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
|
brmedeiros/dicey9000
|
tests.py
|
Python
|
mit
| 7,240 | 0.008702 |
import unittest
import unittest.mock as mock
import dice
import dice_config as dcfg
import dice_exceptions as dexc
class DiceInputVerificationTest(unittest.TestCase):
def test_dice_roll_input_wod(self):
examples = {'!r 5':[5, 10, None, 10, 8, 'wod', None],
'!r 2000':[2000, 10, None, 10, 8, 'wod', None],
'!r 2d8':[2, 8, None, None, None, 'wod', None],
'!r 7d6x4':[7, 6, None, 4, None, 'wod', None],
'!r 5000d700x700':[5000, 700, None, 700, None, '
|
wod', None],
'!r 15d20?20':[15, 20, None, None, 20, 'wod', None],
'!r 39d10x5?8':[39, 10, None, 5, 8, 'wod', None],
'!r 1d4x4?4':[1, 4, None, 4, 4, 'wod', None],
'!r 6d6+':[6, 6, 0, None, None, 'wod', None],
'
|
!r 5d32+5':[5, 32, 5, None, None, 'wod', None],
'!r 17d4-12':[17, 4, -12, None, None, 'wod', None],
'!r 3d12+x12':[3, 12, 0, 12, None, 'wod', None],
'!r 10d20-7?15':[10, 20, -7, None, 15, 'wod', None],
'!r 768d37+33x5?23':[768, 37, 33, 5, 23, 'wod', None]}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_roll_input_simple(self):
examples = {'!r 7':[7, 6, 0, None, None, 'simple', None],
'!r 2000':[2000, 6, 0, None, None, 'simple', None],
'!r 2d8':[2, 8, None, None, None, 'simple', None],
'!r 7d6x4':[7, 6, None, 4, None, 'simple', None],
'!r 8000d899x899':[8000, 899, None, 899, None, 'simple', None],
'!r 15d20?20':[15, 20, None, None, 20, 'simple', None],
'!r 39d10x5?8':[39, 10, None, 5, 8, 'simple', None],
'!r 1d4x4?4':[1, 4, None, 4, 4, 'simple', None],
'!r 6d6+':[6, 6, 0, None, None, 'simple', None],
'!r 5d32+5':[5, 32, 5, None, None, 'simple', None],
'!r 17d4-12':[17, 4, -12, None, None, 'simple', None],
'!r 3d12+x12':[3, 12, 0, 12, None, 'simple', None],
'!r 10d20-7?15':[10, 20, -7, None, 15, 'simple', None],
'!r 768d37+33x5?23':[768, 37, 33, 5, 23, 'simple', None]}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, 'simple')
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_options_help(self):
examples = {'!r help': [None, None, None, None, None, dcfg.mode, 'Find all available commands at:'
'\nhttps://github.com/brmedeiros/dicey9000/blob/master/README.md']}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, dcfg.mode)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_options_mode(self):
examples = {'!r set wod': [None, None, None, None, None,
'wod', 'Default mode (!r n) set to World of Darksness (WoD)'],
'!r set simple': [None, None, None, None, None,
'simple', 'Default mode (!r n) set to simple (nd6)']}
for dmode in ['wod', 'simple']:
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, dmode)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_input_exception(self):
examples = ['!r ', '!r dmeoamdef', '!r kelf laij', '!r 2 3', '!r 6dz','!r 30dx', '!r 5d7x7?', '!r 9d10?',
'!r -10', '!r -6d8', '!r 6d8x?10', '!r 12d12x18?', '!r set ', '!r set help', '!r set akneoi',
'!r 3d6 help', '!r set 6d8?4 wod', '!r 6d12-', '!r 8d4-45?+', '!r 12d6+8-9', '!r 8d20-923+1x10?15',
'!r 6+','!r 5+2', '!r 7-', '!r 12-3', '!r 20x4', '!r 25?12', '!r 2+7x4?4', '!r 5-12x15?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.RollInputError, dice.dice_input_verification, example, mode)
def test_exploding_dice_exception(self):
examples = ['!r 5d8x9', '!r 12d60x100', '!r 1d6x9?4', '!r 78d5+x43', '!r 6d12-10x15', '!r 8d20+1x22?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.ExplodingDiceError, dice.dice_input_verification, example, mode)
def test_exploding_dice_too_small_exception(self):
examples = ['!r 5d8x1', '!r 8d6x2', '!r 3d70x1?10', '!r 10d2x2?2', '!r 78d5+x2', '!r 6d12-10x1',
'!r 8d20+1x2?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.ExplodingDiceTooSmallError, dice.dice_input_verification, example, mode)
def test_success_condition_exception(self):
examples = ['!r 2d8?9', '!r 2d15?55', '!r 65d10x6?11', '!r 32d5x5?100', '!r 78d5+?6', '!r 6d12-10?45',
'!r 8d20+1x18?200']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.SuccessConditionError, dice.dice_input_verification, example, mode)
def test_dice_type_exception(self):
examples = ['!r 2d0', '!r 50d0?55', '!r 6d0x6?11', '!r 32d0x5?100', '!r 78d0+?6', '!r 6d0-10?45',
'!r 8d0+1x18?200']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.DiceTypeError, dice.dice_input_verification, example, mode)
class DiceRollTest(unittest.TestCase):
@mock.patch('random.randint')
def test_roll_dice(self, random_call):
results = [1, 4, 6, 6, 2, 3, 5]
random_call.side_effect = results
target = dice.DiceRoll(7, 6, None, None, None)
target.roll_dice()
self.assertEqual(7, target.number_of_dice)
self.assertEqual(7, len(target.results))
for i, result in enumerate(results):
self.assertEqual(result, target.results[i])
self.assertEqual(str(result), target.formated_results[i])
@mock.patch('random.randint')
def test_total(self, random_call):
results = [1, 10, 5, 4, 10]
random_call.side_effect = results
examples = [0, 5, -10, 22, -50]
for example in examples:
target = dice.DiceRoll(5, 10, example, None, None)
target.roll_dice()
self.assertEqual(example, target.roll_modifier)
self.assertEqual(sum(results) + example, target.total)
@mock.patch('random.randint')
def test_explode(self, random_call):
results = [1, 12, 5, 4, 7, 6]
random_call.side_effect = results
target = dice.DiceRoll(6, 12, None, 12, None)
target.roll_dice()
self.assertEqual(12, target.explode_value)
self.assertEqual(len(results)+1, len(target.results))
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/numpy/linalg/__init__.py
|
Python
|
agpl-3.0
| 2,178 | 0 |
"""
Core Linear Algebra Tools
=========================
============
|
=== ==========================================================
Linear algebra basics
==========================================================================
norm Vector or matrix norm
inv Inverse of a square matrix
solve Solve a linear system of equations
det Determinant of a square matrix
slogdet Logarithm of the determinant of a square matrix
lstsq Solve linear least-squares problem
pin
|
v Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
matrix_power Integer power of a square matrix
=============== ==========================================================
=============== ==========================================================
Eigenvalues and decompositions
==========================================================================
eig Eigenvalues and vectors of a square matrix
eigh Eigenvalues and eigenvectors of a Hermitian matrix
eigvals Eigenvalues of a square matrix
eigvalsh Eigenvalues of a Hermitian matrix
qr QR decomposition of a matrix
svd Singular value decomposition of a matrix
cholesky Cholesky decomposition of a matrix
=============== ==========================================================
=============== ==========================================================
Tensor operations
==========================================================================
tensorsolve Solve a linear tensor equation
tensorinv Calculate an inverse of a tensor
=============== ==========================================================
=============== ==========================================================
Exceptions
==========================================================================
LinAlgError Indicates a failed linear algebra operation
=============== ==========================================================
"""
# To get sub-modules
from info import __doc__
from linalg import *
from numpy.testing import Tester
test = Tester().test
bench = Tester().test
|
intel/ipmctl
|
BaseTools/Source/Python/Eot/Parser.py
|
Python
|
bsd-3-clause
| 33,751 | 0.004207 |
## @file
# This file is used to define common parsing related functions used in parsing
# Inf/Dsc/Makefile process
#
# Copyright (c) 2008 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os, re
import Common.EdkLogger as EdkLogger
from Common.DataType import *
from CommonDataClass.DataClass import *
from Common.String import CleanString, GetSplitValueList, ReplaceMacro
import EotGlobalData
from Common.Misc import sdict
from Common.String import GetSplitList
from Common.LongFilePathSupport import OpenLongFilePath as open
## PreProcess() method
#
# Pre process a file
#
# 1. Remove all comments
# 2. Merge multiple lines code to one line
#
# @param Filename: Name of the file to be parsed
# @param MergeMultipleLines: Switch for if merge multiple lines
# @param LineNo: Default line no
#
# @return Lines: The file contents after remvoing comments
#
def PreProcess(Filename, MergeMultipleLines = True, LineNo = -1):
Lines = []
Filename = os.path.normpath(Filename)
if not os.path.isfile(Filename):
EdkLogger.error("Eot", EdkLogger.FILE_NOT_FOUND, ExtraData=Filename)
IsFindBlockComment = False
IsFindBlockCode = False
ReservedLine = ''
ReservedLineLength = 0
for Line in open(Filename, 'r'):
Line = Line.strip()
# Remove comment block
if Line.find(TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
Lines.append('')
continue
# Remove comments at tail and remove spaces again
Line = CleanString(Line)
if Line == '':
Lines.append('')
continue
if MergeMultipleLines:
# Add multiple lines to one line
if IsFindBlockCode and Line[-1] != TAB_SLASH:
ReservedLine = (ReservedLine + TAB_SPACE_SPLIT + Line).strip()
Lines.append(ReservedLine)
for Index in (0, ReservedLineLength):
Lines.append('')
ReservedLine = ''
ReservedLineLength = 0
IsFindBlockCode = False
continue
if Line[-1] == TAB_SLASH:
ReservedLine = ReservedLine + TAB_SPACE_SPLIT + Line[0:-1].strip()
ReservedLineLength = ReservedLineLength + 1
IsFindBlockCode = True
continue
Lines.append(Line)
return Lines
## AddToGlobalMacro() method
#
# Add a macro to EotGlobalData.gMACRO
#
# @param Name: Name of the macro
# @param Value: Value of the macro
#
def AddToGlobalMacro(Name, Value):
Value = ReplaceMacro(Value, EotGlobalData.gMACRO, True)
EotGlobalData.gMACRO[Name] = Value
## AddToSelfMacro() method
#
# Parse a line of macro definition and add it to a macro set
#
# @param SelfMacro: The self macro set
# @param Line: The line of a macro definition
#
# @return Name: Name of macro
# @return Value: Value of macro
#
def AddToSelfMacro(SelfMacro, Line):
Name, Value = '', ''
List = GetSplitValueList(Line, TAB_EQUAL_SPLIT, 1)
if len(List) == 2:
Name = List[0]
Value = List[1]
Value = ReplaceMacro(Value, EotGlobalData.gMACRO, True)
Value = ReplaceMacro(Value, SelfMacro, True)
SelfMacro[Name] = Value
return (Name, Value)
## GetIncludeListOfFile() method
#
# Get the include path list for a source file
#
# 1. Find the source file belongs to which INF file
# 2. Find the inf's package
# 3. Return the include path list of the package
#
# @param WorkSpace: WORKSPACE path
# @param Filepath: File path
# @param Db: Eot database
#
# @return IncludeList: A list of include directories
#
def GetIncludeListOfFile(WorkSpace, Filepath, Db):
IncludeList = []
Filepath = os.path.normpath(Filepath)
SqlCommand = """
select Value1 from Inf where Model = %s and BelongsToFile in(
select distinct B.BelongsToFile from File as A left join Inf as B
where A.ID = B.BelongsToFile and B.Model = %s and (A.Path || '%s' || B.Value1) = '%s')""" \
% (MODEL_META_DATA_PACKAGE, MODEL_EFI_SOURCE_FILE, '\\', Filepath)
RecordSet = Db.TblFile.Exec(SqlCommand)
for Record in RecordSet:
DecFullPath = os.path.normpath(os.path.join(WorkSpace, Record[0]))
(DecPath, DecName) = os.path.split(DecFullPath)
SqlCommand = """select Value1 from Dec where BelongsToFile =
(select ID from File where FullPath = '%s') and Model = %s""" \
% (DecFullPath, MODEL_EFI_INCLUDE)
NewRecordSet = Db.TblDec.Exec(SqlCommand)
for NewRecord in NewRecordSet:
IncludePath = os.path.normpath(os.path.join(DecPath, NewRecord[0]))
if IncludePath not in IncludeList:
IncludeList.append(IncludePath)
return IncludeList
## GetTableList() method
#
# Search table file and find all small tables
#
# @param FileModelList: Model code for the file list
# @param Table: Table to insert records
# @param Db: Eot database
|
#
# @return TableList: A list of tables
#
def GetTableList(FileModelList, Table, Db):
TableList = []
SqlCommand = """select ID, FullPath from File where Model in %s""" % st
|
r(FileModelList)
RecordSet = Db.TblFile.Exec(SqlCommand)
for Record in RecordSet:
TableName = Table + str(Record[0])
TableList.append([TableName, Record[1]])
return TableList
## GetAllIncludeDir() method
#
# Find all Include directories
#
# @param Db: Eot database
#
# @return IncludeList: A list of include directories
#
def GetAllIncludeDirs(Db):
IncludeList = []
SqlCommand = """select distinct Value1 from Inf where Model = %s order by Value1""" % MODEL_EFI_INCLUDE
RecordSet = Db.TblInf.Exec(SqlCommand)
for Record in RecordSet:
IncludeList.append(Record[0])
return IncludeList
## GetAllIncludeFiles() method
#
# Find all Include files
#
# @param Db: Eot database
#
# @return IncludeFileList: A list of include files
#
def GetAllIncludeFiles(Db):
IncludeList = GetAllIncludeDirs(Db)
IncludeFileList = []
for Dir in IncludeList:
if os.path.isdir(Dir):
SubDir = os.listdir(Dir)
for Item in SubDir:
if os.path.isfile(Item):
IncludeFileList.append(Item)
return IncludeFileList
## GetAllSourceFiles() method
#
# Find all source files
#
# @param Db: Eot database
#
# @return SourceFileList: A list of source files
#
def GetAllSourceFiles(Db):
SourceFileList = []
SqlCommand = """select distinct Value1 from Inf where Model = %s order by Value1""" % MODEL_EFI_SOURCE_FILE
RecordSet = Db.TblInf.Exec(SqlCommand)
for Record in RecordSet:
SourceFileList.append(Record[0])
return SourceFileList
## GetAllFiles() method
#
# Find all files, both source files and include files
#
# @param Db: Eot database
#
# @return FileList: A list of files
#
def GetAllFiles(Db):
FileList = []
IncludeFileList = GetAllIncludeFiles(Db)
SourceFileList = GetAllSourceFiles(Db)
for Item in IncludeFileList:
if os.
|
CognitionGuidedSurgery/restflow
|
doc/conf.py
|
Python
|
gpl-3.0
| 8,911 | 0.005948 |
# -*- coding: utf-8 -*-
#
# restflow documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 31 07:32:50 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('_themes'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'undoc-members',
'show-inheritance', ]
#autodoc_member_order = 'groupwise'
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'restflow'
copyright = u'2014, Alexander Weigl, Nicolai Schoch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = 'alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#toda
|
y_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking fo
|
r source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'kr'
html_theme_path = ['_themes']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'restflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'restflow.tex', u'restflow Documentation',
u'Alexander Weigl, Nicolai Schoch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'restflow', u'restflow Documentation',
[u'Alexander Weigl, Nicolai Schoch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target
|
pablorecio/Cobaya
|
src/cobaya/app.py
|
Python
|
gpl-3.0
| 4,990 | 0.000601 |
###############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details.
|
#
# #
# You should have receive
|
d a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Copyright (C) 2010, Pablo Recio Quijano <precio@yaco.es> #
# 2010, Lorenzo Gil Sanchez <lgs@yaco.es> #
###############################################################################
from datetime import datetime, timedelta
from optparse import OptionParser
from os import path
from cobaya import version_string
from cobaya.hamster_task import HamsterTask
from cobaya.hamster_db import HamsterDB
from cobaya.config import Config
from cobaya.remote_server import RemoteServer
class CobayaApp(object):
def __init__(self, options):
self.conf = Config()
self.conf.load(options.config_file)
self.log_file = self.conf.get_option('hamster.log_file')
self.ids = []
if path.exists(self.log_file):
f = file(self.log_file, 'r')
self.ids = f.readlines()
else:
f = file(self.log_file, 'w')
f.close()
self.tasks = get_all_tasks(self.conf)
for id in self.tasks:
str_id = ('%d\n' % id)
if str_id in self.ids:
self.tasks[id].remote_sync = True
def generate_unsynced_data(self):
data = []
for id in self.tasks:
if self.tasks[id].remote_sync == False and \
self.tasks[id].time != 0.0: # not synced or not finished
data = self.append_and_merge(data, id)
return data
def append_and_merge(self, data, id):
d = self.tasks[id].to_dict()
band = False
for i in range(len(data)):
if data[i]['date'] == d['date'] and \
data[i]['project'] == d['project'] and \
data[i]['ticket'] == d['ticket']:
data[i]['time'] += d['time']
if (d['description'] and not data[i]['description']) or \
(d['description'] and not d['description'] in data[i]['description']):
if data[i]['description']:
data[i]['description'] = '%s ||| %s' % (data[i]['description'], d['description'])
else:
data[i]['description'] = d['description']
band = True
if not band or not len(data):
data.append(d)
return data
def perform_notification(self):
unsynced_data = self.generate_unsynced_data()
server = RemoteServer(self.conf)
responses = server.send_tasks(unsynced_data)
news_id = []
synced_tasks = responses['accepted'] + responses['duplicated']
for task in synced_tasks:
id = task['task_id']
news_id.append("%d\n" % id)
self.tasks[id].remote_sync = True
f = file(self.log_file, 'a')
f.writelines(news_id)
f.close()
def get_all_tasks(conf):
"""Returns a list with every task registred on Hamster.
"""
db = HamsterDB(conf)
fact_list = db.all_facts_id
security_days = int(conf.get_option('tasks.security_days'))
today = datetime.today()
tasks = {}
for fact_id in fact_list:
ht = HamsterTask(fact_id, conf, db)
if ht.end_time:
end_time = ht.get_object_dates()[1]
if today - timedelta(security_days) <= end_time:
rt = ht.get_remote_task()
tasks[rt.task_id] = rt
db.close_connection()
print 'Obtained %d tasks' % len(tasks)
return tasks
def main():
parser = OptionParser(usage="usage: %prog [options]",
version="%prog " + version_string)
parser.add_option("-c", "--config", dest="config_file", default=None,
help="configuration file to use")
(options, args) = parser.parse_args()
cob = CobayaApp(options)
cob.perform_notification()
if __name__ == '__main__':
main()
|
celiafish/scikit-xray
|
skxray/core/utils.py
|
Python
|
bsd-3-clause
| 38,692 | 0.00031 |
#! encoding: utf-8
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for the 'core' data types.
"""
from __future__ import absolute_import, division, print_function
import six
from six.moves import zip
from six import string_types
import time
import sys
from collections import namedtuple, MutableMapping, defaultdict, deque
import numpy as np
from itertools import tee
import logging
logger = logging.getLogger(__name__)
try:
import src.ctrans as ctrans
except ImportError:
try:
import ctrans
except ImportError:
ctrans = None
md_value = namedtuple("md_value", ['value', 'units'])
_defaults = {
"bins": 100,
'nx': 100,
'ny': 100,
'nz': 100
}
class NotInstalledError(ImportError):
'''
Custom exception that should be subclassed to handle
specific missing libraries
'''
pass
class MD_dict(MutableMapping):
"""
A class to make dealing with the meta-data scheme for DataExchange easier
Examples
--------
Getting and setting data by path is possible
>>> tt = MD_dict()
>>> tt['name'] = 'test'
>>> tt['nested.a'] = 2
>>> tt['nested.b'] = (5, 'm')
>>> tt['nested.a'].value
2
>>> tt['nested.a'].units is None
True
>>> tt['name'].value
'test'
>>> tt['nested.b'].units
'm'
"""
def __init__(self, md_dict=None):
# TODO properly walk the input on upgrade dicts -> MD_dict
if md_dict is None:
md_dict = dict()
self._dict = md_dict
self._split = '.'
def __repr__(self):
return self._dict.__repr__()
# overload __setitem__ so dotted paths work
def __setitem__(self, key, val):
key_split = key.split(self._split)
tmp = self._dict
for k in key_split[:-1]:
try:
tmp = tmp[k]._dict
except:
tmp[k] = type(self)()
tmp = tmp[k]._dict
if isinstance(tmp, md_value):
# TODO make message better
raise KeyError("trying to use a leaf node as a branch")
# if passed in an md_value, set it and return
if isinstance(val, md_value):
tmp[key_split[-1]] = val
return
# catch the case of a bare string
elif isinstance(val, string_types):
# a value with out units
tmp[key_split[-1]] = md_value(val, 'text')
return
# not something easy, try to guess what to do instead
try:
# if the second element is a string or None, cast to named tuple
if isinstance(val[1], string_types) or val[1] is None:
print('here')
tmp[key_split[-1]] = md_value(*val)
# else, assume whole thing is the value with no units
else:
tmp[key_split[-1]] = md_value(val, None)
# catch any type errors from trying to inde
|
x into non-indexable things
# or from trying to use iterables longer than 2
except TypeError:
tmp[key_split[-1]] = md_value(val, None)
def __getitem__(self, key):
key_split = key.split(self._split)
|
tmp = self._dict
for k in key_split[:-1]:
try:
tmp = tmp[k]._dict
except:
tmp[k] = type(self)()
tmp = tmp[k]._dict
if isinstance(tmp, md_value):
# TODO make message better
raise KeyError("trying to use a leaf node as a branch")
return tmp.get(key_split[-1], None)
def __delitem__(self, key):
# pass one delete the entry
# TODO make robust to non-keys
key_split = key.split(self._split)
tmp = self._dict
for k in key_split[:-1]:
# make sure we are grabbing the internal dict
tmp = tmp[k]._dict
del tmp[key_split[-1]]
# TODO pass 2 remove empty branches
def __len__(self):
return len(list(iter(self)))
def __iter__(self):
return _iter_helper([], self._split, self._dict)
def _iter_helper(path_list, split, md_dict):
"""
Recursively walk the tree and return the names of the leaves
"""
for k, v in six.iteritems(md_dict):
if isinstance(v, md_value):
yield split.join(path_list + [k])
else:
for inner_v in _iter_helper(path_list + [k], split, v._dict):
yield inner_v
class verbosedict(dict):
"""
A sub-class of dict which raises more verbose errors if
a key is not found.
"""
def __getitem__(self, key):
try:
v = dict.__getitem__(self, key)
except KeyError:
if len(self) < 25:
new_msg = ("You tried to access the key '{key}' "
"which does not exist. The "
"extant keys are: {valid_keys}").format(
key=key, valid_keys=list(self))
else:
new_msg = ("You tried to access the key '{key}' "
"which does not exist. There "
"are {num} extant keys, which is too many to "
"show you").format(
key=key, num=len(self))
six.reraise(KeyError, KeyError(new_msg), sys.exc_info()[2])
return v
class RCParamDict(MutableMapping):
"""A class to make dealing with storing default values easier.
RC params is a hold- over from the UNIX days where configuration
files are 'rc' files. See
http://en.wikipedia.org/wiki/Configuration_file
Examples
--------
Getting and setting data by path is possible
|
jessicalucci/TaskManagement
|
taskflow/persistence/backends/sqlalchemy/models.py
|
Python
|
apache-2.0
| 3,197 | 0 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy import types as types
from taskflow.openstack.common import jsonutils
from taskflow.openstack.common import timeutils
from taskflow.openstack.common import uuidutils
BASE = declarative_base()
# TODO(harlowja): remove when oslo.db exists
class TimestampMixin(object):
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
class Json(types.TypeDecorator, types.MutableType):
impl = types.Text
def process_bind_param(self, value, dialect):
return jsonutils.dumps(value)
def process_result_value(self, value, dialect):
return jsonutils.loads(value)
class ModelBase(TimestampMixin):
"""Base model for all taskflow objects"""
uuid = Column(String, default=uu
|
idutils.generate_uuid,
primary_key=True, nullable=False, unique=True)
name = Column(String, nullable=True)
meta = Column(Json, nullable=True)
class LogBook(BASE, ModelBase):
"""Represents a logbook for a set of flows"""
__tablename__ = 'logbooks'
# Relationsh
|
ips
flowdetails = relationship("FlowDetail",
single_parent=True,
backref=backref("logbooks",
cascade="save-update, delete, "
"merge"))
class FlowDetail(BASE, ModelBase):
__tablename__ = 'flowdetails'
# Member variables
state = Column(String)
# Relationships
parent_uuid = Column(String, ForeignKey('logbooks.uuid'))
taskdetails = relationship("TaskDetail",
single_parent=True,
backref=backref("flowdetails",
cascade="save-update, delete, "
"merge"))
class TaskDetail(BASE, ModelBase):
__tablename__ = 'taskdetails'
# Member variables
state = Column(String)
results = Column(Json)
exception = Column(Json)
stacktrace = Column(Json)
version = Column(String)
# Relationships
parent_uuid = Column(String, ForeignKey('flowdetails.uuid'))
|
usc-isi-i2/etk
|
etk/cli/cryptographic_hash_extractor.py
|
Python
|
mit
| 947 | 0.004224 |
import warnings
import sys
import argparse
from etk.extractors.
|
cryptographic_hash_extractor import Cryptogra
|
phicHashExtractor
cryptographic_hash_extractor = CryptographicHashExtractor()
def add_arguments(parser):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
parser.description = 'Examples:\n' \
'python -m etk cryptographic_hash_extractor /tmp/input.txt\n' \
'cat /tmp/input.txt | python -m etk cryptographic_hash_extractor'
parser.add_argument('input_file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
def run(args):
"""
Args:
args (argparse.Namespace)
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for line in args.input_file:
extractions = cryptographic_hash_extractor.extract(line)
for e in extractions:
print(e.value)
|
ruymanengithub/vison
|
vison/campaign/CEAFPAcampaign.py
|
Python
|
gpl-3.0
| 4,667 | 0.003643 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Description of the CEA FPA Campaign.
:History:
Created on Wed Oct 02 17:26:00 2019
:author: Ruyman Azzollini
"""
# IMPORT STUFF
from collections import OrderedDict
from pdb import set_trace as stop
import numpy as np
import copy
#from vison.pipe import lib as pilib
#from vison.support import context
from vison.support import utils
from vison.fpatests.cea_dec19 import FWD_WARM
from vison.fpatests.cea_dec19 import FPA_BIAS
from vison.fpatests.cea_dec19 import FPA_CHI
|
NJ
from vison.fpatests.cea_dec19 import FPA_DARK
# END IMPORT
def generate_test_sequence(toGen, elvis='FPA', FPAdesign='final'):
"""
| Function that generates a number of tests, as instances of their corresponding
task classes.
| Aimed at the TVAC campaign of the FPA at CEA (december 2019).
"""
taskslist = toGen.keys()
test_
|
sequence = OrderedDict()
for taskname in taskslist:
if not toGen[taskname]:
continue
strip_taskname, iteration = utils.remove_iter_tag(taskname, Full=True)
_toGen = OrderedDict()
_toGen[strip_taskname] = True
ans = _generate_test_sequence(_toGen, elvis=elvis, FPAdesign=FPAdesign)
if iteration is not None:
for key in list(ans.keys()):
test_sequence['%s.%i' % (key, iteration)] = copy.deepcopy(ans[key])
else:
for key in list(ans.keys()):
test_sequence[key] = copy.deepcopy(ans[key])
return test_sequence
def _generate_test_sequence(toGen, elvis='FPA', FPAdesign='final'):
""" """
#print 'GENERATING TEST SEQUENCE...'
test_sequence = OrderedDict()
_toGen = dict(FWD_WARM=False,
CHINJ=False,
DARK=False,
BIAS_RWDVS_WARM=False,
BIAS_RWDV_WARM=False,
BIAS_RWDVS_COLD=False,
BIAS_RWDV_COLD=False,
BIAS_FWD_COLD=False)
_toGen.update(toGen)
commoninputs = dict(elvis=elvis,
FPAdesign=FPAdesign)
# DARK-CURRENT RAMP
if _toGen['FWD_WARM']:
fwd_warm_inp = dict(
test='FWD_WARM')
fwd_warm_inp.update(commoninputs)
fwd_warm = FWD_WARM.FWD_WARM(inputs=fwd_warm_inp.copy())
test_sequence['FWD_WARM'] = copy.deepcopy(fwd_warm)
if _toGen['CHINJ']:
chinj_inp = dict(
test='CHINJ',
non=30,
noff=50)
chinj_inp.update(commoninputs)
chinj = FPA_CHINJ.CHINJ(inputs=chinj_inp.copy())
test_sequence['CHINJ'] = copy.deepcopy(chinj)
if _toGen['DARK']:
dark_inp = dict(
test='DARK',
exptime=565.)
dark_inp.update(commoninputs)
dark = FPA_DARK.DARK(inputs=dark_inp.copy())
test_sequence['DARK'] = copy.deepcopy(dark)
if _toGen['BIAS_RWDVS_WARM']:
rwdvs_warm_inp = dict(
test='BIAS_RWDVS_WARM',
temperature='WARM',
readmode='RWDVS')
rwdvs_warm_inp.update(commoninputs)
rwdvs_warm = FPA_BIAS.FPA_BIAS(inputs=rwdvs_warm_inp.copy())
test_sequence['BIAS_RWDVS_WARM'] = copy.deepcopy(rwdvs_warm)
if _toGen['BIAS_RWDV_WARM']:
rwdv_warm_inp = dict(
test='BIAS_RWDV_WARM',
temperature='WARM',
readmode='RWDV')
rwdv_warm_inp.update(commoninputs)
rwdv_warm = FPA_BIAS.FPA_BIAS(inputs=rwdv_warm_inp.copy())
test_sequence['BIAS_RWDV_WARM'] = copy.deepcopy(rwdv_warm)
if _toGen['BIAS_RWDVS_COLD']:
rwdvs_cold_inp = dict(
test='BIAS_RWDVS_COLD',
temperature='COLD',
readmode='RWDVS')
rwdvs_cold_inp.update(commoninputs)
rwdvs_cold = FPA_BIAS.FPA_BIAS(inputs=rwdvs_cold_inp.copy())
test_sequence['BIAS_RWDVS_COLD'] = copy.deepcopy(rwdvs_cold)
if _toGen['BIAS_RWDV_COLD']:
rwdv_cold_inp = dict(
test='BIAS_RWDV_COLD',
temperature='COLD',
readmode='RWDV')
rwdv_cold_inp.update(commoninputs)
rwdv_cold = FPA_BIAS.FPA_BIAS(inputs=rwdv_cold_inp.copy())
test_sequence['BIAS_RWDV_COLD'] = copy.deepcopy(rwdv_cold)
if _toGen['BIAS_FWD_COLD']:
fwd_cold_inp = dict(
test='BIAS_FWD_COLD',
temperature='COLD',
readmode='FWD')
fwd_cold_inp.update(commoninputs)
fwd_cold = FPA_BIAS.FPA_BIAS(inputs=fwd_cold_inp.copy())
test_sequence['BIAS_FWD_COLD'] = copy.deepcopy(fwd_cold)
return test_sequence
|
wprice/qpid-proton
|
proton-c/bindings/python/proton/handlers.py
|
Python
|
apache-2.0
| 20,366 | 0.002062 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import heapq, logging, os, re, socket, time, types
from proton import dispatch, generate_uuid, PN_ACCEPTED, SASL, symbol, ulong, Url
from proton import Collector, Connection, Delivery, Described, Endpoint, Event, Link, Terminus, Timeout
from proton import Message, Handler, ProtonException, Transport, TransportException, ConnectionException
from select import select
class OutgoingMessageHandler(Handler):
"""
A utility for simpler and more intuitive handling of delivery
events related to outgoing i.e. sent messages.
"""
def __init__(self, auto_settle=True, delegate=None):
self.auto_settle = auto_settle
self.delegate = delegate
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit:
self.on_sendable(event)
def on_delivery(self, event):
dlv = event.delivery
if dlv.link.is_sender and dlv.updated:
if dlv.remote_state == Delivery.ACCEPTED:
self.on_accepted(event)
elif dlv.remote_state == Delivery.REJECTED:
self.on_rejected(event)
elif dlv.remote_state == Delivery.RELEASED or dlv.remote_state == Delivery.MODIFIED:
self.on_released(event)
if dlv.settled:
self.on_settled(event)
if self.auto_settle:
dlv.settle()
def on_sendable(self, event):
"""
Called when the sender link has credit and messages can
therefore be transferred.
"""
if self.delegate:
dispatch(self.delegate, 'on_sendable', event)
def on_accepted(self, event):
"""
Called when the remote peer accepts an outgoing message.
"""
if self.delegate:
dispatch(self.delegate, 'on_accepted', event)
def on_rejected(self, event):
"""
Called when the remote peer rejects an outgoing message.
"""
if self.delegate:
dispatch(self.delegate, 'on_rejected', event)
def on_released(self, event):
"""
Called when the remote peer releases an outgoing message. Note
that this may be in response to either the RELEASE or MODIFIED
state as defined by the AMQP specification.
"""
if self.delegate:
dispatch(self.delegate, 'on_released', event)
def on_settled(self, event):
"""
Called when the remote peer has settled the outgoing
message. This is the point at which it shouod never be
retransmitted.
"""
if self.delegate:
dispatch(self.delegate, 'on_settled', event)
def recv_msg(delivery):
msg = Message()
msg.decode(delivery.link.recv(delivery.pending))
delivery.link.advance()
return msg
class Reject(ProtonException):
"""
An exception that indicate a message should be rejected
"""
pass
class Release(ProtonException):
"""
An exception that indicate a message should be rejected
"""
pass
class Acking(object):
def accept(self, delivery):
"""
Accepts a received message.
"""
self.settle(delivery, Delivery.ACCEPTED)
def reject(self, delivery):
"""
Rejects a received message that is considered invalid or
unprocessable.
"""
self.settle(delivery, Delivery.REJECTED)
def release(self, delivery, delivered=True):
"""
Releases a received message, making it available at the source
for any (other) interested receiver. The ``delivered``
parameter indicates whether this should be considered a
delivery attempt (and the delivery count updated) or not.
"""
if delivered:
self.settle(delivery, Delivery.MODIFIED)
else:
self.settle(delivery, Delivery.RELEASED)
def settle(self, delivery, state=None):
if state:
delivery.update(state)
delivery.settle()
class IncomingMessageHandler(Handler, Acking):
"""
A utility for simpler and more intuitive handling of delivery
events related to incoming i.e. received messages.
"""
def __init__(self, auto_accept=True, delegate=None):
self.delegate = delegate
self.auto_accept = auto_accept
def on_delivery(self, event):
dlv = event.delivery
if not dlv.link.is_receiver: return
if dlv.readable and not dlv.partial:
event.message = recv_msg(dlv)
if event.link.state & Endpoint.LOCAL_CLOSED:
if self.auto_accept:
dlv.update(Delivery.RELEASED)
dlv.settle()
else:
try:
self.on_message(event)
if self.auto_accept:
dlv.update(Delivery.ACCEPTED)
dlv.settle()
except Reject:
dlv.update(Delivery.REJECTED)
dlv.settle()
except Release:
dlv.update(Delivery.MODIFIED)
dlv.settle()
elif dlv.updated and dlv.settled:
self.on_settled(event)
def on_message(self, event):
"""
Called when a message is received. The message itself can be
obtained as a property on the event. For the purpose of
refering to this message in further actions (e.g. if
explicitly accepting it
|
, the ``delivery`` should be used, also
obtainable via a property on the event.
"""
if self.delegate:
dispatch(self.delegate, 'on_message', event)
def on_settled(self, event):
if self.delegate:
dispatch(self.delegate, 'on_settled', event)
class EndpointStateHandler(Handler):
|
"""
A utility that exposes 'endpoint' events i.e. the open/close for
links, sessions and connections in a more intuitive manner. A
XXX_opened method will be called when both local and remote peers
have opened the link, session or connection. This can be used to
confirm a locally initiated action for example. A XXX_opening
method will be called when the remote peer has requested an open
that was not initiated locally. By default this will simply open
locally, which then triggers the XXX_opened call. The same applies
to close.
"""
def __init__(self, peer_close_is_error=False, delegate=None):
self.delegate = delegate
self.peer_close_is_error = peer_close_is_error
@classmethod
def is_local_open(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_ACTIVE
@classmethod
def is_local_uninitialised(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_UNINIT
@classmethod
def is_local_closed(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_CLOSED
@classmethod
def is_remote_open(cls, endpoint):
return endpoint.state & Endpoint.REMOTE_ACTIVE
@classmethod
def is_remote_closed(cls, endpoint):
return endpoint.state & Endpoint.REMOTE_CLOSED
@classmethod
def print_error(cls, endpoint, endpoint_type):
if endpoint.remote_condition:
logging.error(endpoint.remote_condition.description)
elif cls.is_local_open(endpoint) and cls.is_remote_closed(endpoint):
logging.error("%s closed by peer" % endpoin
|
openmaraude/APITaxi
|
APITaxi_models2/migrations/versions/20170314_18:59:50_ccd5b0142a76_add_customer_foreign_key.py.py
|
Python
|
agpl-3.0
| 631 | 0.011094 |
"""Add customer foreign key
Revision ID: ccd5b0142a76
Revises: 243adac5e3e9
Create Date: 2017-03-14 18:59:50.5
|
05319
"""
# revision identifiers, used by Alembic.
revision = 'ccd5b0142a76'
down_revision = '243adac5e3e9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key('hail_customer_id', 'hail', 'custome
|
r', ['customer_id', 'added_by'], ['id', 'moteur_id'])
### end Alembic commands ###
def downgrade():
op.drop_constraint('hail_customer_id', 'hail', type_='foreignkey')
|
thuydang/djagazin
|
docs/9_tmp/hatta-wiki-1.5.3/dev.py
|
Python
|
bsd-2-clause
| 466 | 0.002146 |
#! /usr/bin/env python
# -*- coding
|
: utf-8 -*-
"""
An auto-reloading standalone wiki server, useful for development.
"""
import hatta
import werkzeug
if __name__=="__main__":
config = hatta.WikiConfig()
config.parse_args()
# config.parse_files()
application = hatta.Wiki(config).
|
application
host = config.get('interface', 'localhost')
port = int(config.get('port', 8080))
werkzeug.run_simple(host, port, application, use_reloader=True)
|
dpaiton/OpenPV
|
pv-core/python/probe_analysis/readProbeParams.py
|
Python
|
epl-1.0
| 4,842 | 0.028501 |
from collections import OrderedDict
#*************************#
# PARAMS #
#*************************#
#Paths
#workspaceDir = "/Users/slundquist/workspace"
#filenames = [("sheng","/Users/slundquist/Desktop/ptLIF.txt")]
#filenames = [("sheng","/Users/slundquist/Desktop/retONtoLif.txt")]
workspaceDir = "/Users/dpaiton/Documents/Work/LANL/workspace" #Dylan Mac
probeFileDir = workspaceDir+"/iHouse/output"
filenames = [("label","path")]
filenames = [
# ("onStelVer1",probeFileDir+"/retONtoStellateVer1.txt"),
# ("onStelVer2",probeFileDir+"/retONtoStellateVer2.txt"),
# ("onStelVer3",probeFileDir+"/retONtoStellateVer3.txt"),
# ("onStelVer4",probeFileDir+"/retONtoStellateVer4.txt"),
# ("onStelHor",probeFileDir+"/retONtoStellateHor.txt"),
# ("onStelDia",probeFileDir+"/retONtoStellateDia.txt"),
# ("offStelVer",probeFileDir+"/retOFFtoStellateVer.txt"),
# ("offStelHor",probeFileDir+"/retOFFtoStellateHor.txt"),
# ("offStelDia",probeFileDir+"/retOFFtoStellateDia.txt"),
("onInterVer1",probeFileDir+"/retONtoInterVer1.txt")]
#filenames = [
# ("ptStellate",probeFileDir+"/ptStellate.txt"),
# ("ptInter",probeFileDir+"/ptInter.txt")]
rootFigOutDir = probeFileDir+"/analysis/probeFigs"
rootFigName = 'pr4Oja'
#Values for range of frames
startTime = 2000000
endTime = 2000100 #End must be under number of lines in file
#Which plots
timePlot = True
weightMap = True #Needs 'weight*' in data dictionary
#Other flags
numTCBins = 2 #number of bins for time course plot
doLegend = False #if True, time graph will have a legend
dispFigs = False #if True, display figures. Otherwise, print them to file.
#Data structure for scale, and data array to store all the data
data = OrderedDict()
#Made time for data
#TIME MUST EXIST AND BE FIRST IN THIS LIST
data['t'] = []
####
####OJA STDP CONN
####
#data['prOjaTr*'] = []
#data['prStdpTr*'] = []
#######
#data['prOjaTr_0_0'] = []
#data['prOjaTr_0_1'] = []
#data['prOjaTr_0_2'] = []
#data['prOjaTr_0_3'] = []
data['prOjaTr_0_4'] = []
#data['prOjaTr_0_5'] = []
#data['prOjaTr_0_6'] = []
#data['prOjaTr_0_7'] = []
#data['prOjaTr_0_8'] = []
#data['prOjaTr_0_9'] = []
#data['prOjaTr_1_0'] = []
#data['prOjaTr_1_1'] = []
#data['prOjaTr_1_2'] = []
#data['prOjaTr_1_3'] = []
#data['prOjaTr_1_4'] = []
#data['prOjaTr_1_5'] = []
#data['prOjaTr_1_6'] = []
#data['prOjaTr_1_18'] = []
#data['prOjaTr_1_19'] = []
#data['prOjaTr_1_20'] = []
#data['prOjaTr_1_21'] = []
#data['prOjaTr_1_22'] = []
#data['prOjaTr_1_23'] = []
#data['prOjaTr_1_24'] = []
#data['prOjaTr_1_25'] = []
#######
#data['poIntTr'] = []
#data['poStdpTr'] = []
#data['poOjaTr'] = []
#######
#data['ampLTD'] = []
#######
#data['weight_0_0'] = []
#data['weight_0_1'] = []
#data['weight_0_2'] = []
#data['weight_0_3'] = []
#data['weight_0_4'] = []
#data['weight_0_5'] = []
#data['weight_0_6'] = []
#data['weight_0_7'] = []
#data['weight_0_8'] = []
#data['weight_0_9'] = []
#######
#data['weight*'] = []
####
####lif layer
####
#data['V'] = []
#data['Vth'] = []
#data['a'] = []
#set scales for plots. Key must be the same as what is in the data dictionary
scale = {}
#scale['weight_0_0'] = 100
#scale['weight_0_1'] = 100
#scale['weight_0_2'] = 100
#scale['weight_0_3'] = 100
#scale['weight_0_4'] = 100
#scale['weight_0_5'] = 100
#scale['weight_0_6'] = 100
#scale['weight_0_7'] = 100
#scale['weight_0_8'] = 100
#scale['weight_0_9'] = 100
#scale['weight_0_10'] = 100
#scale['weight_0_11'] = 100
#scale['weight_0_12'] = 100
#scale['weight_0_13'] = 100
#scale['weight_0_14'] = 100
#scale['weight_0_15'] = 100
#scale['weight_0_16'] = 100
#scale['weight_0_17'] = 100
#scale['weight_0_18'] = 100
#scale['weight_0_19'] = 100
#scale['weight_0_20'] = 100
#scale['weight_0_21'] = 100
#scale['weight_0_22'] = 100
#scale['weight_0_23'] = 100
#scale['weight_0_24'] = 100
#scale['weight_4_0'] = 100
#scale['weight_4_1'] = 100
#scale['weight_4_2'] = 100
#scale['weight_4_3'] = 100
#scale['weight_4_4'] = 100
#scale['weight_4_5'] = 100
#scale['weight_4_6'] = 100
#scale['wei
|
ght_4_7'] =
|
100
#scale['weight_4_8'] = 100
#scale['weight_4_9'] = 100
#scale['weight_4_10'] = 100
#scale['weight_4_11'] = 100
#scale['weight_4_12'] = 100
#scale['weight_4_13'] = 100
#scale['weight_4_14'] = 100
#scale['weight_4_15'] = 100
#scale['weight_4_16'] = 100
#scale['weight_4_17'] = 100
#scale['weight_4_18'] = 100
#scale['weight_4_19'] = 100
#scale['weight_4_20'] = 100
#scale['weight_4_21'] = 100
#scale['weight_4_22'] = 100
#scale['weight_4_23'] = 100
#scale['weight_4_24'] = 100
|
CIGIHub/greyjay
|
greyjay/articles/migrations/0076_articlepage_video_document.py
|
Python
|
mit
| 598 | 0.001672 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
|
('wagtaildocs', '0003_add
|
_verbose_names'),
('articles', '0075_auto_20151015_2022'),
]
operations = [
migrations.AddField(
model_name='articlepage',
name='video_document',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtaildocs.Document', null=True),
),
]
|
GoogleCloudPlatform/professional-services-data-validator
|
third_party/ibis/ibis_addon/operations.py
|
Python
|
apache-2.0
| 5,439 | 0.001839 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The Ibis Addons Operations are intended to help facilitate new expressions
when required before they can be pushed upstream to Ibis.
Raw SQL Filters:
The ability to inject RawSQL into a query DNE in Ibis. It must be built out
and applied to each Ibis Data Source directly as each has
extended it's own registry. Eventually this can potentially be pushed to
Ibis as an override, though it would not apply for Pandas and other
non-textual languages.
"""
import ibis
import sqlalchemy
import ibis.expr.api
from ibis_bigquery.compiler import (
reduction as bq_reduction,
BigQueryExprTranslator
)
import ibis.expr.datatypes as dt
from ibis.expr.operations import (
Arg, Com
|
parison, Reduction, ValueOp
)
import ibis.expr.rules as rlz
from ibis.expr.types import (
BinaryValue, IntegerColumn, StringValue
)
from ibis.backends.impala.compiler import ImpalaExprTranslator
from ibis.backends.pandas import client a
|
s _pandas_client
from ibis.backends.base_sqlalchemy.alchemy import AlchemyExprTranslator
from third_party.ibis.ibis_oracle.compiler import OracleExprTranslator
from third_party.ibis.ibis_teradata.compiler import TeradataExprTranslator
# from third_party.ibis.ibis_mssql.compiler import MSSQLExprTranslator # TODO figure how to add RAWSQL
# from third_party.ibis.ibis_snowflake.compiler import SnowflakeExprTranslator
# from third_party.ibis.ibis_oracle.compiler import OracleExprTranslator <<<<<< DB2
class BitXor(Reduction):
"""Aggregate bitwise XOR operation."""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of([rlz.value(dt.string), rlz.value(dt.binary)]))
how = Arg(rlz.isin({'sha256', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', 'binary')
class RawSQL(Comparison):
pass
def compile_hash(numeric_value, how):
return Hash(numeric_value, how=how).to_expr()
def compile_hash(binary_value, how):
return Hash(binary_value, how=how).to_expr()
def format_hash_bigquery(translator, expr):
op = expr.op()
arg, how = op.args
arg_formatted = translator.translate(arg)
if how == 'farm_fingerprint':
return f'farm_fingerprint({arg_formatted})'
else:
raise NotImplementedError(how)
def compile_hashbytes(binary_value, how):
return HashBytes(binary_value, how=how).to_expr()
def format_hash_bigquery(translator, expr):
arg, how = expr.op().args
compiled_arg = translator.translate(arg)
if how == "farm_fingerprint":
return f"FARM_FINGERPRINT({compiled_arg})"
else:
raise ValueError(f"unexpected value for 'how': {how}")
def format_hashbytes_bigquery(translator, expr):
arg, how = expr.op().args
compiled_arg = translator.translate(arg)
if how == "sha256":
return f"SHA256({compiled_arg})"
elif how == "farm_fingerprint":
return f"FARM_FINGERPRINT({compiled_arg})"
else:
raise ValueError(f"unexpected value for 'how': {how}")
def format_hashbytes_teradata(translator, expr):
arg, how = expr.op().args
compiled_arg = translator.translate(arg)
if how == "sha256":
return f"hash_sha256({compiled_arg})"
elif how == "sha512":
return f"hash_sha512({compiled_arg})"
elif how == "md5":
return f"hash_md5({compiled_arg})"
else:
raise ValueError(f"unexpected value for 'how': {how}")
def compile_raw_sql(table, sql):
op = RawSQL(table[table.columns[0]].cast(dt.string), ibis.literal(sql))
return op.to_expr()
def format_raw_sql(translator, expr):
op = expr.op()
rand_col, raw_sql = op.args
return raw_sql.op().args[0]
def sa_format_raw_sql(translator, expr):
op = expr.op()
rand_col, raw_sql = op.args
return sqlalchemy.text(raw_sql.op().args[0])
_pandas_client._inferable_pandas_dtypes["floating"] = _pandas_client.dt.float64
IntegerColumn.bit_xor = ibis.expr.api._agg_function('bit_xor', BitXor, True)
BinaryValue.hash = compile_hash
StringValue.hash = compile_hash
BinaryValue.hashbytes = compile_hashbytes
StringValue.hashbytes = compile_hashbytes
BigQueryExprTranslator._registry[BitXor] = bq_reduction('BIT_XOR')
BigQueryExprTranslator._registry[Hash] = format_hash_bigquery
BigQueryExprTranslator._registry[HashBytes] = format_hashbytes_bigquery
AlchemyExprTranslator._registry[RawSQL] = format_raw_sql
BigQueryExprTranslator._registry[RawSQL] = format_raw_sql
ImpalaExprTranslator._registry[RawSQL] = format_raw_sql
OracleExprTranslator._registry[RawSQL] = sa_format_raw_sql
TeradataExprTranslator._registry[RawSQL] = format_raw_sql
TeradataExprTranslator._registry[HashBytes] = format_hashbytes_teradata
|
daikk115/test-rolling-upgrade-openstack
|
graceful_exit.py
|
Python
|
mit
| 2,834 | 0.001059 |
"""This script reponsible put all of send_get_request() function results
into list, gracefull exit any script import it and return analytics
"""
import time
import signal
import sys
from requests_futures.sessions import FuturesSession
tasks = []
session = FuturesSession()
def bg_cb(sess, resp):
"Callback function when requests done"
timestamp = time.time() * 1000
tasks.append({
"timestamp": timestamp,
"status": resp.status_code
})
print("%d - %d - %s" % (timestamp, resp.status_code, resp.request.method))
print(resp.url)
def footer():
"Return result of testing process"
is_find_start = True
count = 0
start, end = 0, 0 # assign this vars prepare if we dont' have downtime
error_dict = {}
for task in tasks:
if is_find_start:
if task.get('status') >= 500:
is_find_start = False
start = task.get('timestamp')
else:
try:
error_dict[task.get('status')] += 1
except:
error_dict[task.get('status')] = 1
if task.get('status') / 100 < 4:
end = task.get('timestamp')
for key in error_dict:
if (int(key) / 100) == 5:
count += error_dict.get(key)
print("Downtime for rolling upgrade process: {} ms" .format(end-start))
print("Number of fail requests (status code >= 500): {}" .format(count))
print(error_dict)
def exit_gracefully(signum, frame):
# Source: Antti Haapala - http://stackoverflow.com/a/18115530
signal.signal(signal.SIGINT, original_sigint)
try:
if raw_input("\nReally quit? (y/n)> ").lower().startswith('y'):
footer()
sys.exit(1)
except KeyboardInterrupt:
print("Ok ok, quitting")
sys.exit(1)
signal.signal(signal.SIGINT, exit_gracefully)
def send_request(url, method, headers=None, data=None, **kwargs):
if method == 'GET':
return session.get(url, headers=headers,
|
background_callback=bg_cb, **kwargs)
elif method == 'POST':
return session.post(url, headers=headers,
data=data, background_callback=bg_cb, **kwargs)
|
elif method == 'PUT':
return session.put(url, headers=headers,
data=data, background_callback=bg_cb, **kwargs)
elif method == 'PATCH':
return session.patch(url, headers=headers,
data=data, background_callback=bg_cb, **kwargs)
elif method == 'DELETE':
return session.delete(url, headers=headers, background_callback=bg_cb, **kwargs)
else:
print("Method does not support: {}" .format(method))
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, exit_gracefully)
|
laurentb/weboob
|
modules/lameteoagricole/module.py
|
Python
|
lgpl-3.0
| 1,524 | 0 |
# -*- coding: utf-8 -*-
# Copyright(C) 2017 Vincent A
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU A
|
ffero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module
from weboob.capabilities.weather import CapWeather
from .browser import LameteoagricoleBrowser
__all__ = ['LameteoagricoleModule']
class
|
LameteoagricoleModule(Module, CapWeather):
NAME = 'lameteoagricole'
DESCRIPTION = u'lameteoagricole website'
MAINTAINER = u'Vincent A'
EMAIL = 'dev@indigo.re'
LICENSE = 'AGPLv3+'
VERSION = '2.1'
BROWSER = LameteoagricoleBrowser
def iter_city_search(self, pattern):
return self.browser.iter_cities(pattern)
def get_current(self, city_id):
return self.browser.get_current(city_id)
def iter_forecast(self, city_id):
return self.browser.iter_forecast(city_id)
|
sealhuang/FreeROI
|
froi/gui/component/intersectdialog.py
|
Python
|
bsd-3-clause
| 3,390 | 0.00177 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm import imtool
class IntersectDialog(QDialog):
"""A dialog for action of intersection."""
def __init__(self, model, parent=None):
super(IntersectDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
def _init_gui(self):
"""Initialize GUI."""
# set dialog title
self.setWindowTitle("Intersect")
# initialize widgets
source_label = QLabel("Source")
self.source_combo = QComboBox()
mask_label = QLabel("Mask")
self.mask_combo = QComboBox()
vol_list = self._model.getItemList()
self.source_combo.addItems(QStringList(vol_list))
row = self._model.currentIndex().row()
self.source_combo.setCurrentIndex(row)
self.mask_combo.addItems(QStringList(vol_list))
out_label = QLabel("Output volume name")
self.out_edit = QLineEdit()
# layout config
grid_layout = QGridLayout()
#grid_layout.addWidget(source_label, 0, 0)
#grid_layout.addWidget(self.source_combo, 0, 1)
grid_layout.addWidget(mask_label, 0, 0)
grid_layout.addWidget(self.mask_combo, 0, 1)
|
grid_layout.addWidget(ou
|
t_label, 1, 0)
grid_layout.addWidget(self.out_edit, 1, 1)
# button config
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.source_combo.currentIndexChanged.connect(self._create_output)
self.mask_combo.currentIndexChanged.connect(self._create_output)
self.run_button.clicked.connect(self._run_intersect)
self.cancel_button.clicked.connect(self.done)
def _create_output(self):
source_name = self.source_combo.currentText()
mask_name = self.mask_combo.currentText()
output_name = '_'.join([str(source_name), str(mask_name)])
self.out_edit.setText(output_name)
def _run_intersect(self):
"""Run an intersecting processing."""
vol_name = str(self.out_edit.text())
if not vol_name:
QMessageBox.critical(self, "No output volume name",
"Please specify output volume's name!")
return
source_row = self.source_combo.currentIndex()
mask_row = self.mask_combo.currentIndex()
source_data = self._model.data(self._model.index(source_row),
Qt.UserRole + 4)
mask_data = self._model.data(self._model.index(mask_row),
Qt.UserRole + 4)
new_vol = imtool.intersect(source_data, mask_data)
self._model.addItem(new_vol,
None,
vol_name,
self._model._data[0].get_header(),
0, 100, 255, 'rainbow')
self.done(0)
|
hansroh/skitai
|
skitai/corequest/httpbase/task.py
|
Python
|
mit
| 22,202 | 0.022205 |
import time
from aquests.athreads import socket_map
from aquests.athreads import trigger
from rs4.cbutil import tuple_cb
from aquests.client.asynconnect import AsynSSLConnect, AsynConnect
from aquests.dbapi.dbconnect import DBConnect
import threading
from aquests.protocols.http import request as http_request
from aquests.protocols.http import request_handler as http_request_handler
from aquests.protocols.http2 import request_handler as http2_request_handler
from aquests.protocols.grpc.request import GRPCRequest
from aquests.protocols.http import response as http_response
from aquests.protocols.ws import request_handler as ws_request_handler
from aquests.protocols.ws import request as ws_request
from . import rcache
from skitai import lifetime
from aquests import asyncore
import sys
import inspect
from skitai import exceptions
from skitai import REQFAIL, UNSENT, TIMEOUT, NETERR, NORMAL
from ...corequest import corequest, response
import sqlite3
try:
import psycopg2
except ImportError:
class PGIntegrityError (Exception):
pass
else:
PGIntegrityError = psycopg2.IntegrityError
DEFAULT_TIMEOUT = 10
WAIT_POLL = False
class OperationError (Exception):
pass
class Result (response, rcache.Result):
def __init__ (self, id, status, response, ident = None):
rcache.Result.__init__ (self, status, ident)
self.node = id
self.__response = response
def __getattr__ (self, attr):
return getattr (self.__response, attr)
def reraise (self):
if self.status_code >= 300:
try:
|
self.__response.expt
except AttributeError:
# redircting to HTTPError
raise exceptions.HTTPError ("%d %s" % (self.status_code, self.reason))
else:
|
self.__response.raise_for_status ()
return self
def close (self):
self.__response = None
def cache (self, timeout = 60, cache_if = (200,)):
if not timeout:
return
if self.status != NORMAL or self.status_code not in cache_if:
return
rcache.Result.cache (self, timeout)
return self
def fetch (self, cache = None, cache_if = (200,), one = False):
self.reraise ()
self.cache (cache, cache_if)
if one:
if len (self.data) == 0:
raise exceptions.HTTPError ("410 Partial Not Found")
if len (self.data) != 1:
raise exceptions.HTTPError ("409 Conflict")
if isinstance (self.data, dict):
return self.data.popitem () [1]
return self.data [0]
return self.data
def one (self, cache = None, cache_if = (200,)):
try:
return self.fetch (cache, cache_if, True)
except (PGIntegrityError, sqlite3.IntegrityError):
# primary or unique index error
raise exceptions.HTTPError ("409 Conflict")
def commit (self):
self.reraise ()
class Results (response, rcache.Result):
def __init__ (self, results, ident = None):
self.results = results
self.status_code = [rs.status_code for rs in results]
rcache.Result.__init__ (self, [rs.status for rs in self.results], ident)
def __iter__ (self):
return self.results.__iter__ ()
@property
def data (self):
return [r.data for r in self.results]
@property
def text (self):
return [r.text for r in self.results]
def reraise (self):
[r.reraise () for r in self.results]
def cache (self, timeout = 60, cache_if = (200,)):
if [_f for _f in [rs.status != NORMAL or rs.status_code not in cache_if for rs in self.results] if _f]:
return
rcache.Result.cache (self, timeout)
return self
def fetch (self, cache = None, cache_if = (200,)):
self.cache (cache, cache_if)
return [r.fetch () for r in self.results]
def one (self, cache = None, cache_if = (200,)):
self.cache (cache, cache_if)
return [r.one () for r in self.results]
class Dispatcher:
def __init__ (self, cv, id, ident = None, filterfunc = None, cachefs = None, callback = None):
self._cv = cv
self.id = id
self.ident = ident
self.filterfunc = filterfunc
self.cachefs = cachefs
self.callback = callback
self.creation_time = time.time ()
self.status = UNSENT
self.result = None
self.handler = None
def get_id (self):
return self.id
def get_status (self):
with self._cv:
return self.status
def request_failed (self):
self.status = REQFAIL
tuple_cb (self, self.callback)
def set_status (self, code, result = None):
with self._cv:
self.status = code
if result:
self.result = result
return code
def get_result (self):
if not self.result:
if self.get_status () == REQFAIL:
self.result = Result (self.id, REQFAIL, http_response.FailedResponse (731, "Request Failed"), self.ident)
else:
self.result = Result (self.id, TIMEOUT, http_response.FailedResponse (730, "Timeout"), self.ident)
return self.result
def do_filter (self):
if self.filterfunc:
self.filterfunc (self.result)
def handle_cache (self, response):
self.set_status (NORMAL, Result (self.id, status, response, self.ident))
def handle_result (self, handler):
if self.get_status () == TIMEOUT:
# timeout, ignore
return
response = handler.response
# DON'T do_filter here, it blocks select loop
if response.code >= 700:
if response.code == 702:
status = TIMEOUT
else:
status = NETERR
else:
status = NORMAL
result = Result (self.id, status, response, self.ident)
cakey = response.request.get_cache_key ()
if self.cachefs and cakey and response.max_age:
self.cachefs.save (
cakey,
response.get_header ("content-type"), response.content,
response.max_age, 0
)
handler.callback = None
handler.response = None
self.set_status (status, result)
tuple_cb (self, self.callback)
class Task (corequest):
DEFAULT_CACHE_TIMEOUT = 42
proto_map = dict (
rpc = http_request.XMLRPCRequest,
xmlrpc = http_request.XMLRPCRequest,
jsonrpc = http_request.JSONRPCRequest,
grpc = GRPCRequest
)
def __init__ (self,
cluster,
uri,
params = None,
reqtype = "get",
headers = None,
auth = None,
meta = None,
use_cache = False,
mapreduce = True,
filter = None,
callback = None,
cache = None,
timeout = 10,
origin = None,
cachefs = None,
logger = None
):
self._uri = uri
self._params = params
self._headers = headers
self._reqtype = reqtype
self._auth = auth
self.set_defaults (cluster, meta, use_cache, mapreduce, filter, callback, cache, timeout, origin, logger, cachefs)
if not self._reqtype.lower ().endswith ("rpc"):
self._build_request ("", self._params)
@classmethod
def add_proto (cls, name, class_):
cls.proto_map [name] = class_
def set_defaults (self, cluster, meta, use_cache, mapreduce, filter, callback, cache, timeout, origin, logger, cachefs = None):
self._cluster = cluster
self._meta = meta or {}
self._use_cache = use_cache
self._mapreduce = mapreduce
self._filter = filter
self._callback = callback
self._cache_timeout = cache
self._timeout = timeout
self._origin = origin
self._cachefs = cachefs
self._logger = logger
self._requests = {}
self._results = []
self._canceled = False
self._init_time = time.time ()
self._cv = No
|
enthought/etsproxy
|
enthought/traits/ui/qt4/ui_live.py
|
Python
|
bsd-3-clause
| 50 | 0 |
# p
|
roxy module
from traitsui.qt4.ui_live import
|
*
|
the-linux-schools-project/karoshi-client
|
clientsetup/buildclient/config/usr/lib/gedit/plugins/advancedfind/find_result.py
|
Python
|
agpl-3.0
| 21,931 | 0.031143 |
# -*- encoding:utf-8 -*-
# find_result.py is part of advancedfind-gedit.
#
#
# Copyright 2010-2012 swatch
#
# advancedfind-gedit is free
|
software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a
|
copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from gi.repository import Gtk, Gedit, Gio
import os.path
import urllib
import re
import config_manager
import shutil
import gettext
APP_NAME = 'advancedfind'
CONFIG_DIR = os.path.expanduser('~/.local/share/gedit/plugins/' + APP_NAME + '/config')
#LOCALE_DIR = '/usr/share/locale'
LOCALE_DIR = os.path.join(os.path.dirname(__file__), 'locale')
if not os.path.exists(LOCALE_DIR):
LOCALE_DIR = '/usr/share/locale'
try:
t = gettext.translation(APP_NAME, LOCALE_DIR)
_ = t.gettext
#Gtk.glade.bindtextdomain(APP_NAME, LOCALE_DIR)
except:
pass
#gettext.install(APP_NAME, LOCALE_DIR, unicode=True)
class FindResultView(Gtk.HBox):
def __init__(self, window, result_gui_settings):
Gtk.HBox.__init__(self)
self._window = window
self.result_gui_settings = result_gui_settings
# load color theme of results list
user_formatfile = os.path.join(CONFIG_DIR, 'theme/'+self.result_gui_settings['COLOR_THEME']+'.xml')
if not os.path.exists(user_formatfile):
if not os.path.exists(os.path.dirname(user_formatfile)):
os.makedirs(os.path.dirname(user_formatfile))
shutil.copy2(os.path.dirname(__file__) + "/config/theme/default.xml", os.path.dirname(user_formatfile))
#print os.path.dirname(user_formatfile)
format_file = user_formatfile
#print format_file
self.result_format = config_manager.ConfigManager(format_file).load_configure('result_format')
config_manager.ConfigManager(format_file).to_bool(self.result_format)
# initialize find result treeview
self.findResultTreeview = Gtk.TreeView()
resultsCellRendererText = Gtk.CellRendererText()
if self.result_format['BACKGROUND']:
resultsCellRendererText.set_property('cell-background', self.result_format['BACKGROUND'])
resultsCellRendererText.set_property('font', self.result_format['RESULT_FONT'])
self.findResultTreeview.append_column(Gtk.TreeViewColumn("line", resultsCellRendererText, markup=1))
self.findResultTreeview.append_column(Gtk.TreeViewColumn("content", resultsCellRendererText, markup=2))
#self.findResultTreeview.append_column(Gtk.TreeViewColumn("result_start", Gtk.CellRendererText(), text=4))
#self.findResultTreeview.append_column(Gtk.TreeViewColumn("result_len", Gtk.CellRendererText(), text=5))
self.findResultTreeview.append_column(Gtk.TreeViewColumn("uri", resultsCellRendererText, text=6))
self.findResultTreeview.set_grid_lines(int(self.result_format['GRID_PATTERN'])) # 0: None; 1: Horizontal; 2: Vertical; 3: Both
self.findResultTreeview.set_headers_visible(self.result_format['SHOW_HEADERS'])
try:
column_num = self.findResultTreeview.get_n_columns()
except:
# For older gtk version.
column_num = len(self.findResultTreeview.get_columns())
if self.result_format['SHOW_HEADERS']:
for i in range(0, column_num):
self.findResultTreeview.get_column(i).set_resizable(True)
else:
for i in range(0, column_num):
self.findResultTreeview.get_column(i).set_sizing(1) # 1=autosizing
self.findResultTreeview.set_rules_hint(True)
self.findResultTreemodel = Gtk.TreeStore(int, str, str, object, int, int, str)
self.findResultTreemodel.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.findResultTreeview.connect("cursor-changed", self.on_findResultTreeview_cursor_changed_action)
self.findResultTreeview.connect("button-press-event", self.on_findResultTreeview_button_press_action)
self.findResultTreeview.set_model(self.findResultTreemodel)
# initialize scrolled window
scrollWindow = Gtk.ScrolledWindow()
scrollWindow.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrollWindow.add(self.findResultTreeview)
# put a separator
v_separator1 = Gtk.VSeparator()
# initialize button box
v_box = Gtk.VBox()
v_buttonbox = Gtk.VButtonBox()
v_buttonbox.set_layout(Gtk.ButtonBoxStyle.END)
v_buttonbox.set_spacing(5)
v_buttonbox.set_homogeneous(True)
self.selectNextButton = Gtk.Button(_("Next"))
self.selectNextButton.set_no_show_all(True)
self.selectNextButton.connect("clicked", self.on_selectNextButton_clicked_action)
self.expandAllButton = Gtk.Button(_("Expand All"))
self.expandAllButton.set_no_show_all(True)
self.expandAllButton.connect("clicked", self.on_expandAllButton_clicked_action)
self.collapseAllButton = Gtk.Button(_("Collapse All"))
self.collapseAllButton.set_no_show_all(True)
self.collapseAllButton.connect("clicked", self.on_collapseAllButton_clicked_action)
self.clearHighlightButton = Gtk.Button(_("Clear Highlight"))
self.clearHighlightButton.set_no_show_all(True)
self.clearHighlightButton.connect("clicked", self.on_clearHightlightButton_clicked_action)
self.clearButton = Gtk.Button(_("Clear"))
self.clearButton.set_no_show_all(True)
self.clearButton.connect("clicked", self.on_clearButton_clicked_action)
self.stopButton = Gtk.Button(_("Stop"))
self.stopButton.set_no_show_all(True)
self.stopButton.connect("clicked", self.on_stopButton_clicked_action)
self.stopButton.set_sensitive(False)
v_buttonbox.pack_start(self.selectNextButton, False, False, 5)
v_buttonbox.pack_start(self.expandAllButton, False, False, 5)
v_buttonbox.pack_start(self.collapseAllButton, False, False, 5)
v_buttonbox.pack_start(self.clearHighlightButton, False, False, 5)
v_buttonbox.pack_start(self.clearButton, False, False, 5)
v_buttonbox.pack_start(self.stopButton, False, False, 5)
v_box.pack_end(v_buttonbox, False, False, 5)
#self._status = Gtk.Label()
#self._status.set_text('test')
#self._status.hide()
#v_box.pack_end(self._status, False)
self.pack_start(scrollWindow, True, True, 5)
self.pack_start(v_separator1, False, False, 0)
self.pack_start(v_box, False, False, 5)
self.show_all()
#initialize context menu
self.contextMenu = Gtk.Menu()
self.expandAllItem = Gtk.MenuItem.new_with_label(_('Expand All'))
self.collapseAllItem = Gtk.MenuItem.new_with_label(_('Collapse All'))
self.clearHighlightItem = Gtk.MenuItem.new_with_label(_('Clear Highlight'))
self.clearItem = Gtk.MenuItem.new_with_label(_('Clear'))
self.stopItem = Gtk.MenuItem.new_with_label(_('Stop'))
self.stopItem.set_sensitive(False)
self.markupItem = Gtk.MenuItem.new_with_label(_('Markup'))
self.contextMenu.append(self.expandAllItem)
self.contextMenu.append(self.collapseAllItem)
self.contextMenu.append(self.clearHighlightItem)
self.contextMenu.append(self.clearItem)
self.contextMenu.append(self.stopItem)
self.contextMenu.append(self.markupItem)
self.expandAllItem.connect('activate', self.on_expandAllItem_activate)
self.collapseAllItem.connect('activate', self.on_collapseAllItem_activate)
self.clearHighlightItem.connect('activate', self.on_clearHighlightItem_activate)
self.clearItem.connect('activate', self.on_clearItem_activate)
self.stopItem.connect('activate', self.on_stopItem_activate)
self.markupItem.connect('activate', self.on_markupItem_activate)
self.expandAllItem.show()
self.collapseAllItem.show()
self.clearHighlightItem.show()
self.clearItem.show()
self.stopItem.show()
#self.markupItem.show()
self.contextMenu.append(Gtk.SeparatorMenuItem())
self.showButtonsItem = Gtk.MenuItem.new_with_label(_('Show Buttons'))
self.contextMenu.append(self.showButtonsItem)
self.showButtonsItem.show()
self.showButtonsSubmenu = Gtk.Menu()
self.showNextButtonItem = Gtk.CheckMenuItem.new_with_label(_
|
DBuildService/atomic-reactor
|
atomic_reactor/plugins/pre_koji_parent.py
|
Python
|
bsd-3-clause
| 14,943 | 0.003279 |
"""
Copyright (c) 2017, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.constants import (
INSPECT_CONFIG, PLUGIN_KOJI_PARENT_KEY, BASE_IMAGE_KOJI_BUILD, PARENT_IMAGES_KOJI_BUILDS,
KOJI_BTYPE_IMAGE
)
from atomic_reactor.plugins.pre_reactor_config import (
get_deep_manifest_list_inspection, get_koji_session,
get_skip_koji_check_for_base_image, get_fail_on_digest_mismatch,
get_platform_to_goarch_mapping
)
from atomic_reactor.plugins.pre_check_and_set_rebuild import is_rebuild
from atomic_reactor.util import (
base_image_is_custom, get_manifest_media_type, is_scratch_build,
get_platforms, RegistrySession, RegistryClient
)
from copy import copy
from osbs.utils import Labels
import json
import koji
import time
DEFAULT_POLL_TIMEOUT = 60 * 10 # 10 minutes
DEFAULT_POLL_INTERVAL = 10 # 10 seconds
class KojiParentBuildMissing(ValueError):
"""Expected to find a build for the parent image in koji, did not find it within timeout."""
class KojiParentPlugin(PreBuildPlugin):
"""Wait for Koji build of parent images to be available
Uses inspected parent image configs to determine the
nvrs (Name-Version-Release) of the parent images. It uses
this information to check if the corresponding Koji
builds exist. This check is performed periodically until
the Koji builds are all found, or timeout expires.
This check is required due to a timing issue that may
occur after the image is pushed to registry, but it
has not been yet uploaded and tagged in Koji. This plugin
ensures that the layered image is only built with parent
images that are known in Koji.
"""
key = PLUGIN_KOJI_PARENT_KEY
is_allowed_to_fail = False
def __init__(self, tasker, workflow, poll_interval=DEFAULT_POLL_INTERVAL,
poll_timeout=DEFAULT_POLL_TIMEOUT):
"""
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param poll_interval: int, seconds between polling for Koji build
:param poll_timeout: int, max amount of seconds to wait for Koji build
"""
super(KojiParentPlugin, self).__init__(tasker, workflow)
self.koji_session = get_koji_session(self.workflow)
self.poll_interval = poll_interval
self.poll_timeout = poll_timeout
self._base_image_nvr = None
self._base_image_build = None
self._parent_builds = {}
self._poll_start = None
self.platforms = get_platforms(self.workflow)
# RegistryClient instances cached by registry name
self.registry_clients = {}
self._deep_manifest_list_inspection = get_deep_manifest_list_inspection(self.workflow,
fallback=True)
def ignore_isolated_autorebuilds(self):
if not self.workflow.source.config.autorebuild.get('ignore_isolated_builds', False):
self.log.debug("ignoring_isolated_builds isn't configured, won't skip autorebuild")
return
base_koji_build = self.wait_for_parent_image_build(self._base_image_nvr)
is_isolated = base_koji_build['extra']['image'].get('isolated', False)
if is_isolated:
self.log.debug("setting cancel_isolated_autorebuild")
self.workflow.cancel_isolated_autorebuild = True
def run(self):
if is_scratch_build(self.workflow):
self.log.info('scratch build, skipping plugin')
return
if not (self.workflow.builder.dockerfile_images.base_from_scratch or
self.workflow.builder.dockerfile_images.custom_base_image):
self._base_image_nvr = self.detect_parent_image_nvr(
self.workflow.builder.dockerfile_images.base_image,
inspect_data=self.workflow.builder.base_image_inspect,
)
if is_rebuild(self.workflow):
self.ignore_isolated_autorebuilds()
manifest_mismatches = []
for img, local_tag in self.workflow.builder.dockerfile_images.items():
if base_image_is_custom(img.to_str()):
continue
nvr = self.detect_parent_image_nvr(local_tag) if local_tag else None
self._parent_builds[img] = self.wait_for_parent_image_build(nvr) if nvr else None
if nvr == self._base_image_nvr:
self._base_image_build = self._parent_builds[img]
if self._parent_builds[img]:
# we need the possible floating tag
check_img = copy(local_tag)
check_img.tag = img.tag
try:
self.check_manifest_digest(check_img, self._parent_builds[img])
except ValueError as exc:
manifest_mismatches.append(exc)
else:
err_msg = ('Could not get koji build info for parent image {}. '
'Was this image built in OSBS?'.format(img.to_str()))
if get_skip_koji_check_for_base_image(self.workflow, fallback=False):
self.log.warning(err_msg)
|
else:
self.log.error(err_msg)
raise RuntimeError(err_msg)
if manifest_mismatches:
mismatch_msg = ('Error while comparing parent images manifest digests in koji with '
'related values from registries: %s')
if get_fail_on_digest_mismatch(self.workflow, fallback=True):
self.log.error(mismatch_msg, manifest_mismatches)
raise Runti
|
meError(mismatch_msg % manifest_mismatches)
self.log.warning(mismatch_msg, manifest_mismatches)
return self.make_result()
def check_manifest_digest(self, image, build_info):
"""Check if the manifest list digest is correct.
Compares the manifest list digest with the value in koji metadata.
Raises a ValueError if the manifest list does not refer to the koji build.
:param image: ImageName, image to inspect
:param build_info: dict, koji build metadata
"""
image_str = image.to_str()
v2_list_type = get_manifest_media_type('v2_list')
v2_type = get_manifest_media_type('v2')
image_digest_data = self.workflow.builder.parent_images_digests[image_str]
if v2_list_type in image_digest_data:
media_type = v2_list_type
elif v2_type in image_digest_data:
media_type = v2_type
else:
# This should not happen - raise just to be safe:
raise RuntimeError('Unexpected parent image digest data for {}. '
'v2 or v2_list expected, got {}'.format(image, image_digest_data))
digest = image_digest_data[media_type]
try:
koji_digest = build_info['extra']['image']['index']['digests'][media_type]
except KeyError as exc:
err_msg = ("Koji build ({}) for parent image '{}' does not have manifest digest data "
"for the expected media type '{}'. This parent image MUST be rebuilt"
.format(build_info['id'], image_str, media_type))
self.log.error(err_msg)
raise ValueError(err_msg) from exc
expected_digest = koji_digest
self.log.info('Verifying manifest digest (%s) for parent %s against its '
'koji reference (%s)', digest, image_str, expected_digest)
if digest != expected_digest:
rebuild_msg = 'This parent image MUST be rebuilt'
mismatch_msg = ('Manifest digest (%s) for parent image %s does not match value in its '
'koji reference (%s). %s')
if not self._deep_manifest_list_inspection:
self.log.error(mismatch_msg, digest, image_str, expected_digest, rebuild_msg)
raise ValueError(mismatch_msg % (digest, image_st
|
gwenniger/joshua
|
scripts/toolkit/extract_references.py
|
Python
|
lgpl-2.1
| 1,922 | 0.023413 |
#!/usr/bin/env python
import os, sys, codecs, re
def usage():
print "Usage info for extract_references.py"
print " extract_references.py ref_sgml ref_prefix"
print
sys.exit()
def main():
if (len(sys.argv) < 3 or sys.argv[1] == "-h"):
usage()
sgml = codecs.open(sys.argv[1], "r", "utf-8")
prefix = sys.argv[2]
doc_pattern = re.compile('.* docid="([^"]*).*"')
seg_pattern = re.compile('.* id="([^"]*)".*')
ref_sets = []
cur_ref_set = []
cur_doc = ""
cur_seg = ""
cur_txt = ""
for line in sgml.readlines():
line_tc = line.strip()
line = line_tc.lower()
if ("<doc " in line):
cur_doc = doc_pattern.search(line).groups()[0]
if (
|
"</refset " in line or
("<doc " in line and cur_doc in map(lambda x: x[0], cur_ref_set))):
ref_sets.append(cur_ref_set)
cur_ref_set = []
if ("<seg " in line):
cur_seg = seg_pattern.search(line).groups()[0]
cur_txt = re.sub("<[^>]*>", "", line_tc)
cur_ref_set.append((cur_doc, cur_seg, cur_txt))
ref_files = []
ref_count = len(ref_sets[0])
for i, ref_set in enumerate(ref_sets):
|
if (ref_count != len(ref_set)):
print "[ERR] reference lengths do not match: " + str(ref_count) \
+ " vs. " + str(len(ref_set)) + " (ref " + str(i) + ")"
ref_files.append(codecs.open(prefix + "_ref." + str(i), "w", "utf-8"))
for j in range(ref_count):
(cur_doc, cur_seg, cur_txt) = ref_sets[0][j]
for i in range(len(ref_sets)):
if (j >= len(ref_sets[i])):
continue
(doc, seg, txt) = ref_sets[i][j]
if (doc != cur_doc or seg != cur_seg):
print "[ERR] document, segment ids don't match up: "
print "\t" + doc + " vs. " + cur_doc
print "\t" + seg + " vs. " + cur_seg
ref_files[i].write(txt + "\n")
for ref_file in ref_files:
ref_file.close()
if __name__ == "__main__":
main()
|
jnns/wagtail
|
wagtail/core/models/view_restrictions.py
|
Python
|
bsd-3-clause
| 2,789 | 0.003944 |
"""
Base model definitions for validating front-end user access to resources such as pages and
documents. These may be subclassed to accommodate specific models such as Page or Collection,
but the definitions here should remain generic and not depend on the base wagtail.core.models
module or specific models defined there.
"""
from django.conf import settings
from django.contrib.auth.models import Group
from django.db import models
from django.utils.translation import gettext_lazy as _
class BaseViewRestriction(models.Model):
NONE = 'none'
PASSWORD = 'password'
GROUPS = 'groups'
LOGIN = 'login'
RESTRICTION_CHOICES = (
(NONE, _("Public")),
(LOGIN, _("Private, accessible to logged-in users")),
(PASSWORD, _("Private, accessible with the following password")),
(GROUPS, _("Private, accessible to users in specific groups")),
)
restriction_type = models.CharField(
max_length=20, choices=RESTRICTION_CHOICES)
password = models.CharField(verbose_name=_('password'), max_length=255, blank=True)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True)
def accept_request(self, request):
if self.restriction_type == BaseViewRestriction.PASSWORD:
passed_restrictions = request.session.get(self.passed_view_restrictions_session_key, [])
|
if self.id not in passed_restrictions:
return False
elif self.restriction_type == BaseViewRestriction.LOGIN:
|
if not request.user.is_authenticated:
return False
elif self.restriction_type == BaseViewRestriction.GROUPS:
if not request.user.is_superuser:
current_user_groups = request.user.groups.all()
if not any(group in current_user_groups for group in self.groups.all()):
return False
return True
def mark_as_passed(self, request):
"""
Update the session data in the request to mark the user as having passed this
view restriction
"""
has_existing_session = (settings.SESSION_COOKIE_NAME in request.COOKIES)
passed_restrictions = request.session.setdefault(self.passed_view_restrictions_session_key, [])
if self.id not in passed_restrictions:
passed_restrictions.append(self.id)
request.session[self.passed_view_restrictions_session_key] = passed_restrictions
if not has_existing_session:
# if this is a session we've created, set it to expire at the end
# of the browser session
request.session.set_expiry(0)
class Meta:
abstract = True
verbose_name = _('view restriction')
verbose_name_plural = _('view restrictions')
|
JrtPec/opengrid
|
opengrid/library/plotting.py
|
Python
|
apache-2.0
| 6,242 | 0.001922 |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 26 18:03:24 2014
@author: KDB
"""
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.dates import date2num, num2date, HourLocator, DayLocator, AutoDateLocator, DateFormatter
from matplotlib.colors import LogNorm
def carpet(timeseries, **kwargs):
"""
Draw a carpet plot of a pandas timeseries.
The carpet plot reads like a letter. Every day one line is added to the
bottom of the figure, minute for minute moving from left (morning) to right
(evening).
The color denotes the level of consumption and is scaled logarithmically.
If vmin and vmax are not provided as inputs, the minimum and maximum of the
colorbar represent the minimum and maximum of the (resampled) timeseries.
Parameters
----------
timeseries : pandas.Series
vmin, vmax : If not None, either or both of these values determine the range
of the z axis. If None, the range is given by the minimum and/or maximum
of the (resampled) timeseries.
zlabel, title : If not None, these determine the labels of z axis and/or
title. If None, the name of the timeseries is used if defined.
cmap : matplotlib.cm instance, default coolwarm
"""
# define optional input parameters
cmap = kwargs.pop('cmap', cm.coolwarm)
norm = kwargs.pop('norm', LogNorm())
interpolation = kwargs.pop('interpolation', 'nearest')
cblabel = kwargs.pop('zlabel', timeseries.name if timeseries.name else '')
title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '')
# data preparation
if timeseries.dropna().empty:
print('skipped {} - no data'.format(title))
return
ts = timeseries.resample('min', how='mean', label='left', closed='left')
vmin = max(0.1, kwargs.pop('vmin', ts[ts > 0].min()))
vmax = max(vmin, kwargs.pop('vmax', ts.quantile(.999)))
# convert to dataframe with date as index and time as columns by
# first replacing the index by a MultiIndex
# tz_convert('UTC'): workaround for https://github.com/matplotlib/matplotlib/issues/3896
mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime))
ts.index = pd.MultiIndex.from_arrays(
[np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround.
# and then unstacking the second index level to columns
df = ts.unstack()
# data plotting
fig, ax = plt.subplots()
# define the extent of the axes (remark the +- 0.5 for the y axis in order to obtain aligned date ticks)
extent = [df.columns[0], df.columns[-1], df.index[-1] + 0.5, df.index[0] - 0.5]
im = plt.imshow(df, vmin=vmin, vmax=vmax, extent=extent, cmap=cmap, aspect='auto', norm=norm,
interpolation=interpolation, **kwargs)
# figure formatting
# x axis
ax.xaxis_date()
ax.xaxis.set_major_locator(HourLocator(interval=2))
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
ax.xaxis.grid(True)
plt.xlabel('UTC Time')
# y axis
ax.yaxis_date()
dmin, dmax = ax.yaxis.get_data_interval()
number_of_days = (num2date(dmax) - num2date(dmin)).days
# AutoDateLocator is not suited in case few data is available
if abs(number_of_days) <= 35:
ax.yaxis.set_major_locator(DayLocator())
else:
ax.yaxis.set_major_locator(AutoDateLocator())
ax.yaxis.set_major_formatter(DateFormatter("%a, %d %b %Y"))
# plot colorbar
cbticks = np.logspace(np.log10(vmin), np.log10(vmax), 11, endpoint=True)
cb = plt.colorbar(format='%.0f', ticks=cbticks)
cb.set_label(cblabel)
# plot title
plt.title(title)
return im
def fanchart(timeseries, **kwargs):
"""
Draw a fan chart of the daily consumption profile.
The fan chart shows the different quantiles of the daily consumption, with
the blue line representing the median, and the black line the average.
By default, the consumption of the whole day is taken, but one can select
the hours of interest, e.g. night time standby consumption.
Parameters
----------
timeseries : pandas.Series
start_hour, end_hour : int or float, optional
Start and end hours of period of interest, default values are 0, 24
As of now, ensure that start_hour < end_hour
ylabel, title : str
If not None, these determine the labels of y axis and/or title.
If None, the name of the timeseries is used if defined.
"""
start_hour = 2. + kwargs.pop('start_hour', 0.) / 24.
end_hour = 2. + kwargs.pop('end_hour', 24.) / 24.
ylabel = kwargs.pop('ylabel', timeseries.name if timeseries.name else '')
title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '')
# data preparation
if timeseries.dropna().empty:
print('skipped {
|
} - no data'.format(title))
return
ts = timeseries.resample('min', how='mean', label='left', closed='left')
# convert to dataframe with date as index and time as columns by
# first replacing the index by a MultiIndex
# tz_convert('UTC'): workaro
|
und for https://github.com/matplotlib/matplotlib/issues/3896
mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime))
ts.index = pd.MultiIndex.from_arrays(
[np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround.
# and then unstacking the second index level to columns
df = ts.unstack()
df = df.T.truncate(start_hour, end_hour)
num = 20
num_max = 4
df_quant = df.quantile(np.linspace(0., 1., 2 * num + 1))
# data plotting
fig, ax = plt.subplots()
im = plt.plot(df.columns, df_quant.iloc[num], 'b', label='median')
for i in range(1, num):
plt.fill_between(df.columns, df_quant.iloc[num - i], df_quant.iloc[min(num + i, 2 * num - num_max)], color='b',
alpha=0.05)
plt.plot(df.columns, df.mean(), 'k--', label='mean')
plt.legend()
# x axis
ax.xaxis_date()
plt.xlim(df.columns[0], df.columns[-1])
plt.ylabel(ylabel)
# plot title
plt.title(title)
plt.grid(True)
return im
|
oinopion/django
|
django/db/backends/oracle/base.py
|
Python
|
bsd-3-clause
| 24,986 | 0.001641 |
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import os
import platform
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.duration import duration_string
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import Oracle_datetime, convert_unicode # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s'
|
,
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSL
|
ATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = sel
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.