repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
goofwear/raspberry_pwn
|
src/pentest/sqlmap/plugins/dbms/oracle/filesystem.py
|
Python
|
gpl-3.0
| 792 | 0.001263 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def readFile(self, rFile):
errMsg = "File system read access not yet implemented
|
for "
errMsg += "Oracle"
raise SqlmapUnsupportedFeatureException(errMsg)
def writeFile(self, wFile, dFile, fileType=None, forceCheck=False):
errMsg = "File system write access not yet implemented for "
errMsg += "Oracle"
raise SqlmapUnsupportedFea
|
tureException(errMsg)
|
narcolepticsnowman/GarageWarden
|
GarageWarden/notify.py
|
Python
|
mit
| 3,025 | 0.002314 |
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from datetime import datetime
from django.http import HttpResponse
from GarageWarden import status, settingHelper, settingView, config, settings as gw_settings
import RPi.GPIO as GPIO
settings = None
settings_loaded = False
def reload_config():
|
global settings, settings_loaded
settings_loaded = True
settings = settingHelper.values_for_prefix("email")
settingView.reload_methods['notify'] = reload_config
def send_mail(subject, text, html=None):
if not get_setting('enabled'):
print('email not enabled')
return
encryption = (get_setting('encryption') or '').lower()
host = get_se
|
tting('host')
port = int(get_setting('port'))
if encryption == 'ssl':
smtp = smtplib.SMTP_SSL(host=host, port=port)
else:
smtp = smtplib.SMTP(host=host, port=port)
if encryption == 'tls':
smtp.starttls()
if get_setting('username') and get_setting('password'):
smtp.login(get_setting('username'), get_setting('password'))
_from = get_setting('from name') or 'GarageWarden'
recipients = get_setting('recipients')
msg = MIMEMultipart("alternative")
msg['Subject'] = subject
msg['From'] = _from
msg['To'] = recipients
if text:
msg.attach(MIMEText(text, "plain"))
if html:
msg.attach(MIMEText(html, "html"))
smtp.sendmail(_from, [r.strip() for r in recipients.split(',') if r], msg.as_string())
def send_state_change_mail(state, color, date):
if get_setting('Status Notification'):
send_mail("Garage " + state, make_text(state, date), make_html(state, color, date))
else:
print('status emails not enabled')
def make_html(state, color, date):
return "Garage was <span style='color: " + color + "'><strong>" + state + "</strong></span> at <i>" + date + "</i>"
def make_text(state, date):
return "Garage was " + state + " at " + date
def state_change():
now = datetime.now()
now_str = now.strftime("%d-%b-%Y %H:%M:%S")
opened = status.garage_is_full_open()
closed = status.garage_is_full_close()
print("State changed to opened: "+str(opened)+" closed: "+str(closed)+" at" + now_str)
if opened:
send_state_change_mail("Opened", "#f0ad4e", now_str)
elif closed:
send_state_change_mail("Closed", "#5cb85c", now_str)
config.state_change_callbacks['notify'] = state_change
def test_email(request):
global settings
print('sending test emails')
if not get_setting('enabled'):
return HttpResponse("Email not enabled")
send_state_change_mail("Test", "#5bc0de", datetime.now().strftime("%d-%b-%Y %H:%M:%S"))
return HttpResponse("Test email sent")
def get_setting(setting):
if not settings_loaded:
reload_config()
return settings[setting]
def start_beep():
GPIO.output(gw_settings.BEEPER_PIN, True)
def stop_beep():
GPIO.output(gw_settings.BEEPER_PIN, False)
|
JaneliaSciComp/osgpyplusplus
|
examples/rough_translated1/osglight.py
|
Python
|
bsd-3-clause
| 10,059 | 0.015409 |
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osglight"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osglight.cpp'
# OpenSceneGraph example, osglight.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgViewer/Viewer>
#include <osg/Group>
#include <osg/Node>
#include <osg/Light>
#include <osg/LightSource>
#include <osg/StateAttribute>
#include <osg/Geometry>
#include <osg/Point>
#include <osg/MatrixTransform>
#include <osg/PositionAttitudeTransform>
#include <osgDB/Registry>
#include <osgDB/ReadFile>
#include <osgUtil/Optimizer>
#include <osgUtil/SmoothingVisitor>
#include "stdio.h"
# callback to make the loaded model oscilate up and down.
class ModelTransformCallback (osg.NodeCallback) :
ModelTransformCallback( osg.BoundingSphere bs)
_firstTime = 0.0
_period = 4.0
_range = bs.radius()*0.5
virtual void operator()(osg.Node* node, osg.NodeVisitor* nv)
pat = dynamic_cast<osg.PositionAttitudeTransform*>(node)
frameStamp = nv.getFrameStamp()
if pat and frameStamp :
if _firstTime==0.0 :
_firstTime = frameStamp.getSimulationTime()
phase = (frameStamp.getSimulationTime()-_firstTime)/_period
phase -= floor(phase)
phase *= (2.0 * osg.PI)
rotation = osg.Quat()
rotation.makeRotate(phase,1.0,1.0,1.0)
pat.setAttitude(rotation)
pat.setPosition(osg.Vec3(0.0,0.0,sin(phase))*_range)
# must traverse the Node's subgraph
traverse(node,nv)
_firstTime = double()
_period = double()
_range = double()
def createLights(bb, rootStateSet):
lightGroup = osg.Group()
modelSize = bb.radius()
# create a spot light.
myLight1 = osg.Light()
myLight1.setLightNum(0)
myLight1.setPosition(osg.Vec4(bb.corner(4),1.0))
myLight1.setAmbient(osg.Vec4(1.0,0.0,0.0
|
,1.0))
myLight1.setDiffuse(osg.Vec4(1.0,0.0,0.0,1.0))
myLight1.setSpotCutoff(20.0)
myLight1.setSpotExponent(50.0)
myLight1.setDirection(osg.Vec3(1.0,1.0,-1.0))
lightS1 = osg.LightSource()
lightS1.setLight(myLight1)
lightS1.s
|
etLocalStateSetModes(osg.StateAttribute.ON)
lightS1.setStateSetModes(*rootStateSet,osg.StateAttribute.ON)
lightGroup.addChild(lightS1)
# create a local light.
myLight2 = osg.Light()
myLight2.setLightNum(1)
myLight2.setPosition(osg.Vec4(0.0,0.0,0.0,1.0))
myLight2.setAmbient(osg.Vec4(0.0,1.0,1.0,1.0))
myLight2.setDiffuse(osg.Vec4(0.0,1.0,1.0,1.0))
myLight2.setConstantAttenuation(1.0)
myLight2.setLinearAttenuation(2.0/modelSize)
myLight2.setQuadraticAttenuation(2.0/osg.square(modelSize))
lightS2 = osg.LightSource()
lightS2.setLight(myLight2)
lightS2.setLocalStateSetModes(osg.StateAttribute.ON)
lightS2.setStateSetModes(*rootStateSet,osg.StateAttribute.ON)
mt = osg.MatrixTransform()
# set up the animation path
animationPath = osg.AnimationPath()
animationPath.insert(0.0,osg.AnimationPath.ControlPoint(bb.corner(0)))
animationPath.insert(1.0,osg.AnimationPath.ControlPoint(bb.corner(1)))
animationPath.insert(2.0,osg.AnimationPath.ControlPoint(bb.corner(2)))
animationPath.insert(3.0,osg.AnimationPath.ControlPoint(bb.corner(3)))
animationPath.insert(4.0,osg.AnimationPath.ControlPoint(bb.corner(4)))
animationPath.insert(5.0,osg.AnimationPath.ControlPoint(bb.corner(5)))
animationPath.insert(6.0,osg.AnimationPath.ControlPoint(bb.corner(6)))
animationPath.insert(7.0,osg.AnimationPath.ControlPoint(bb.corner(7)))
animationPath.insert(8.0,osg.AnimationPath.ControlPoint(bb.corner(0)))
animationPath.setLoopMode(osg.AnimationPath.SWING)
mt.setUpdateCallback(osg.AnimationPathCallback(animationPath))
# create marker for point light.
marker = osg.Geometry()
vertices = osg.Vec3Array()
vertices.push_back(osg.Vec3(0.0,0.0,0.0))
marker.setVertexArray(vertices)
marker.addPrimitiveSet(osg.DrawArrays(GL_POINTS,0,1))
stateset = osg.StateSet()
point = osg.Point()
point.setSize(4.0)
stateset.setAttribute(point)
marker.setStateSet(stateset)
markerGeode = osg.Geode()
markerGeode.addDrawable(marker)
mt.addChild(lightS2)
mt.addChild(markerGeode)
lightGroup.addChild(mt)
return lightGroup
def createWall(v1, v2, v3, stateset):
# create a drawable for occluder.
geom = osg.Geometry()
geom.setStateSet(stateset)
noXSteps = 100
noYSteps = 100
coords = osg.Vec3Array()
coords.reserve(noXSteps*noYSteps)
dx = (v2-v1)/((float)noXSteps-1.0)
dy = (v3-v1)/((float)noYSteps-1.0)
row = unsigned int()
vRowStart = v1
for(row=0row<noYSteps++row)
v = vRowStart
for(unsigned int col=0col<noXSteps++col)
coords.push_back(v)
v += dx
vRowStart+=dy
geom.setVertexArray(coords)
colors = osg.Vec4Array(1)
(*colors)[0].set(1.0,1.0,1.0,1.0)
geom.setColorArray(colors, osg.Array.BIND_OVERALL)
for(row=0row<noYSteps-1++row)
quadstrip = osg.DrawElementsUShort(osg.PrimitiveSet.QUAD_STRIP)
quadstrip.reserve(noXSteps*2)
for(unsigned int col=0col<noXSteps++col)
quadstrip.push_back((row+1)*noXSteps+col)
quadstrip.push_back(row*noXSteps+col)
geom.addPrimitiveSet(quadstrip)
# create the normals.
osgUtil.SmoothingVisitor.smooth(*geom)
return geom
def createRoom(loadedModel):
# default scale for this model.
bs = osg.BoundingSphere(osg.Vec3(0.0,0.0,0.0),1.0)
root = osg.Group()
if loadedModel :
loaded_bs = loadedModel.getBound()
pat = osg.PositionAttitudeTransform()
pat.setPivotPoint(loaded_bs.center())
pat.setUpdateCallback(ModelTransformCallback(loaded_bs))
pat.addChild(loadedModel)
bs = pat.getBound()
root.addChild(pat)
bs.radius()*=1.5
# create a bounding box, which we'll use to size the room.
bb = osg.BoundingBox()
bb.expandBy(bs)
# create statesets.
rootStateSet = osg.StateSet()
root.setStateSet(rootStateSet)
wall = osg.StateSet()
wall.setMode(GL_CULL_FACE,osg.StateAttribute.ON)
floor = osg.StateSet()
floor.setMode(GL_CULL_FACE,osg.StateAttribute.ON)
roof = osg.StateSet()
roof.setMode(GL_CULL_FACE,osg.StateAttribute.ON)
geode = osg.Geode()
# create front side.
geode.addDrawable(createWall(bb.corner(0),
bb.corner(4),
bb.corner(1),
wall))
# right side
geode.addDrawable(createWall(bb.corner(1),
bb.corner(5),
bb.corner(3),
wall))
# left side
geode.addDrawable(createWall(bb.cor
|
MichaelMauderer/GeneaCrystal
|
geneacrystal/nodes.py
|
Python
|
gpl-3.0
| 10,098 | 0.008418 |
# GeneaCrystal Copyright (C) 2012-2013
# Christian Jaeckel, <christian.doe@gmail.com>
# Frederic Kerber, <fkerber@gmail.com>
# Pascal Lessel, <maverickthe6@gmail.com>
# Michael Mauderer, <mail@michaelmauderer.de>
#
# GeneaCrystal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GeneaCrystal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GeneaCrystal. If not, see <http://www.gnu.org/licenses/>.
import libavg as avg
import pymunk
from geneacrystal import util, physic
from geneacrystal.alphaKeyboard import AlphaKeyboard
from geneacrystal.highscore import Highscore
class ItemImageNode(avg.DivNode):
def __init__(self, href, size, *args, **kwargs):
avg.DivNode.__init__(self, *args, **kwargs)
self.pivot = 0, 0
self.opacity = 1
self.sensitive = False
imageNode = avg.ImageNode(parent=self,
opacity=1,
href=href,
size=size,
)
imageNode.pos = util.vectorMult(size, -0.5)
self.image = imageNode
if __debug__:
self.elementoutlinecolor = "FFFFFF"
@property
def size(self):
return self.image.size
@size.setter
def size(self, value):
self.image.size = value
util.centerNodeOnPosition(self.image, (0,0))
def setEffect(self, node):
self.image.setEffect(node)
def setEventHandler(self, *args, **kwargs):
return self.image.setEventHandler(*args, **kwargs)
class TouchPointNode(avg.CircleNode):
def delete(self):
self.unlink(True)
def __init__(self, space, theme=None, owner=None, *args, **kwargs):
avg.CircleNode.__init__(self, *args, **kwargs)
if theme is None:
from geneacrystal import themes
self._theme = themes.DefaultTheme
self.owner = owner
self._body = physic.TouchPointBody(self)
self._body.position = tuple(self.pos)
self.filltexhref = self._theme.getStaticImage("TouchPointNode")
#self.fillcolor = "00FF00"
self.strokewidth = 0
self.shape = pymunk.Circle(self._body, self.r, (0, 0))
self.shape.elasticity = 1
self.shape.collision_type = physic.TouchPointCollisionType
space.add(self._body, self.shape)
if __debug__:
print "Created ", self
def __str__(self, *args, **kwargs):
formatString = "TouchPointNode(pos={tp.pos}, owner={tp.owner})"
return formatString.format(tp=self)
class ShieldNode(avg.LineNode):
def __init__(self, space, owner=None, *args, **kwargs):
avg.LineNode.__init__(self, *args, **kwargs)
self._body = physic.ShieldBody(self)
self.owner = owner
self._body.position = tuple(self.pos1)
from geneacrystal import themes
self.texhref = themes.DefaultTheme.getStaticImage("Wall")
self.fillopacity = 0
self.opacity = 1
space.add(self._body, self._body.shape)
self._body.sleep()
def update(self, pos1, pos2):
self.pos1 = pos1
self.pos2 = pos2
self._body.position = tuple(self.pos1)
self._body.shape.b = util.transformVector((pos2.x - pos1.x, pos2.y - pos1.y))
def delete(self):
pass
class HighscoreEntryNode(avg.DivNode):
def __init__(self, mode, score, allScores, callback=None, theme=None, *args, **kwargs):
avg.DivNode.__init__(self, *args, **kwargs)
if theme is None:
from geneacrystal import themes
theme = themes.DefaultTheme
bgPath = theme.getStaticImage("keySymbol")
backPath = theme.getStaticImage("backspaceSymbol")
enterPath = theme.getStaticImage("enterSymbol")
shiftPath = theme.getStaticImage("shiftSymbol")
emptyPath = theme.getStaticImage("spaceSymbol")
highscore = Highscore(mode)
myScores = []
myScores.extend(allScores)
myScores.extend(highscore.scores)
myScores.sort(reverse=True, key=lambda val: int(val))
if len(myScores) < util.MAX_HIGHSCORE_LENGTH or score > int(myScores[9]) or score == int(myScores[9]) and not score in highscore.scores:
self.__value = ""
def onKeyDown(keyCode):
if len(self.__value) < 20:
self.__value += keyCode
self.__edit.text += keyCode
def onBack():
self.__value = self.__value[0:-1]
self.__edit.text = self.__value
def onEnter():
if not self.__value == "":
highscore.addEntry(self.__value, score)
if callback is not None:
callback(self.__value)
self._keyboard.cleanup()
self._keyboard.unlink(True)
self._keyboard = None
self.__edit.unlink(True)
self.__edit = None
self.unlink(True)
self.__edit = avg.WordsNode(size=(self.size.x, self.size.y // 8),
parent=self, fontsize=self.size.y // 8,
alignment="center")
self.__edit.pos = (self.size.x // 2, 0)
self._keyboard = AlphaKeyboard(bgPath, backPath, enterPath, shiftPath,
emptyPath , onKeyDown=onKeyDown,
onBack=onBack, onEnter=onEnter,
size=(self.size.x, self.size.y // 10 * 8),
pos=(0, self.size.y // 5),
parent=self)
else:
if callback is not None:
callback("")
self.unlink(True)
class ItemImageLayeredNode(avg.DivNode):
def __init__(self, layers,size, *args, **kwargs):
avg.DivNode.__init__(self, *args, **kwargs)
self.pivot = 0, 0
self.opacity = 1
self.sensitive = False
childPos = util.vectorMult(size, -0.5)
self._layer = []
self._topImage = None
for image in layers:
node = avg.ImageNode(parent=self,
opacity=1,
href=image,
size=size,
pos=childPos,
sensitive=False
)
self._layer.append(node)
node.sensitiv
|
e=True
self._topImage = self._layer[-1]
def removeLayer(self, index):
node = self._layer[index]
node.unlink(True)
self._layer.remove(node)
if node == self._topImage:
self._topImage = self._layer[-1]
|
@property
def size(self):
return self._layer[0].size
def setEventHandler(self, *args, **kwargs):
return self._topImage.setEventHandler(*args, **kwargs)
def setEffect(self, *args, **kwargs):
for node in self._layer:
node.setEffect(*args, **kwargs)
class OverlayNode(avg.DivNode):
def __init__(self, theme=None, *
|
noironetworks/networking-cisco
|
networking_cisco/db/migration/alembic_migrations/versions/mitaka/expand/73c84db9f299_update_ha_group_primary_key.py
|
Python
|
apache-2.0
| 2,956 | 0 |
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import op
from neutron.db import migration
from sqlalchemy.engine import reflection
"""update_ha_group_primary_key
Revision ID: 73c84db9f299
Revises: 972479e0e629
Create Date: 2017-10-05 05:31:54.243849
"""
# revision identifiers, used by Alembic.
revision = '73c84db9f299'
down_revision = '972479e0e629'
def upgrade():
if migration.schema_has_table('cisco_router_ha_groups'):
inspector = reflection.Inspector.from_engine(op.get_bind())
foreign_keys = inspector.get_foreign_keys('cisco_router_ha_groups')
migration.rem
|
ove_foreign_keys('cisco_router_ha_groups', foreign_keys)
primary_key = inspector.get_pk_constraint('cisco_router_ha_groups')
op.drop_constraint(constraint_name=primary_key['name'],
table_name='cisco_router_ha_groups',
|
type_='primary')
op.create_primary_key(
constraint_name='pk_cisco_router_ha_groups',
table_name='cisco_router_ha_groups',
columns=['ha_port_id', 'subnet_id'])
op.create_foreign_key('cisco_router_ha_groups_ibfk_1',
source_table='cisco_router_ha_groups',
referent_table='ports',
local_cols=['ha_port_id'],
remote_cols=['id'],
ondelete='CASCADE'),
op.create_foreign_key('cisco_router_ha_groups_ibfk_2',
source_table='cisco_router_ha_groups',
referent_table='ports',
local_cols=['extra_port_id'],
remote_cols=['id'],
ondelete='SET NULL'),
op.create_foreign_key('cisco_router_ha_groups_ibfk_3',
source_table='cisco_router_ha_groups',
referent_table='subnets',
local_cols=['subnet_id'],
remote_cols=['id'])
op.create_foreign_key('cisco_router_ha_groups_ibfk_4',
source_table='cisco_router_ha_groups',
referent_table='routers',
local_cols=['user_router_id'],
remote_cols=['id'])
|
tangentlabs/django-oscar-fancypages
|
oscar_fancypages/fancypages/templatetags/fp_container_tags.py
|
Python
|
bsd-3-clause
| 56 | 0 |
from
|
fancypages.templatetags.fp_container_tags import
|
*
|
fsinf/certificate-authority
|
ca/django_ca/migrations/0002_auto_20151223_1508.py
|
Python
|
gpl-3.0
| 1,224 | 0.001634 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-23 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_ca', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='cn',
|
field=models.CharField(max_length
|
=64, verbose_name='CommonName'),
),
migrations.AlterField(
model_name='certificate',
name='csr',
field=models.TextField(verbose_name='CSR'),
),
migrations.AlterField(
model_name='certificate',
name='pub',
field=models.TextField(verbose_name='Public key'),
),
migrations.AlterField(
model_name='certificate',
name='revoked_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='Revoked on'),
),
migrations.AlterField(
model_name='certificate',
name='revoked_reason',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='Reason for revokation'),
),
]
|
CivicKnowledge/rowgenerators
|
rowgenerators/appurl/archive/zip.py
|
Python
|
mit
| 6,923 | 0.004911 |
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT, included in this distribution as LICENSE
""" """
from rowgenerators.appurl.file.file import FileUrl
from rowgenerators.exceptions import AppUrlError
class ZipUrlError(AppUrlError):
pass
class ZipUrl(FileUrl):
"""Zip URLS represent a zip file, as a local resource. """
match_priority = FileUrl.match_priority - 10
def __init__(self, url=None, downloader=None, **kwargs):
kwargs['resource_format'] = 'zip'
super().__init__(url, downloader=downloader, **kwargs)
@property
def target_file(self):
"""
Returns the target file, which is usually stored in the first slot in the ``fragment``,
but may have been overridden with a ``fragment_query``.
:return:
"""
if self._target_file:
return self._target_file
if self.fragment[0]:
return self.fragment[0]
for ext in ('csv', 'xls', 'xlsx'):
if self.resource_file.endswith('.' + ext + '.zip'):
return self.resource_file.replace('.zip', '')
# Want to return none, so get_files_from-zip can assume to use the first file in the archive.
return None
def join_target(self, tf):
"""
Joins the target ``tf`` by setting the value of the first slot of the fragment.
:param tf:
:return: a clone of this url with a new fragment.
"""
u = self.clone()
try:
tf = str(tf.path)
except:
pass
u.fragment = [tf, u.fragment[1]] # In case its a tuple, don't edit in place
return u
def get_resource(self):
return self
@property
def zip_dir(self):
"""Directory that files will be extracted to"""
from os.path import abspath
cache_dir = self.downloader.cache.getsyspath('/')
target_path = abspath(self.fspath)
if target_path.startswith(cache_dir): # Case when file is already in cache
return str(self.fspath) + '_d'
else: # file is not in cache; it may exist elsewhere.
return self.downloader.cache.getsyspath(target_path.lstrip('/'))+'_d'
def get_target(self):
"""
Extract the target file from the archive, store it in the cache, and return a file Url to the
cached file.
"""
from rowgenerators.appurl.url import parse_app_url
from zipfile import ZipFile
import io
from os.path import join, dirname
from rowgenerators.appurl.util import copy_file_or_flo, ensure_dir
assert self.zip_dir
zf = ZipFile(str(self.fspath))
self._target_file = ZipUrl.get_file_from_zip(self)
target_path = join(self.zip_dir, self.target_file)
ensure_dir(dirname(target_path))
with io.open(target_path, 'wb') as f, zf.open(self.target_file) as flo:
copy_file_or_flo(flo, f)
fq = self.fragment_query
if 'resource_format' in fq:
del fq['resource_format']
if 'resource_file' in fq:
del fq['resource_file']
tu = parse_app_url(target_path,
fragment_query=fq,
fragment=[self.target_segment, None],
scheme_extension=self.scheme_extension,
# Clear out the resource info so we don't get a ZipUrl
downloader=self.downloader
)
if self.target_format != tu.target_format:
try:
tu.target_format = self.target_format
except AttributeError:
pass # Some URLS don't allow resetting target type.
return tu
def list(self):
"""List the files in the referenced Zip file"""
from zipfile import ZipFile
if self.target_file:
return list(self.set_target_segment(tl.target_segment) for tl in self.get_target().list())
else:
real_files = ZipUrl.real_files_in_zf(ZipFile(str(self.fspath)))
return list(self.set_target_file(rf) for rf in real_files)
@staticmethod
def get_file_from_zip(url):
"""Given a file name that may be a regular expression, return the full name for the file
from a zip archive"""
from zipfile import ZipFile
import re
names = []
zf = ZipFile(str(url.fspath))
nl = list(ZipUrl.real_files_in_zf(zf)) # Old way, but maybe gets links? : list(zf.namelist())
tf = url.target_file
ts = url.target_segment
if not nl:
# sometimes real_files_in_zf doesn't work at all. I don't know why it does work,
# so I certainly don't know why it does not.
nl = list(zf.namelist())
# the target_file may be a string, or a regular expression
if tf:
names = list([e for e in nl if re.search(tf, e)
and not (e.startswith('__') or e.startswith('.'))
])
if len(names) > 0:
return names[0]
# The segment, if it exists, can only be an integer, and should probably be
# '0' to indicate the first file. This clause is probably a bad idea, since
# andy other integer is probably meaningless.
if ts:
try:
return nl[int(ts)]
except (IndexError, ValueError):
pass
# Just return the first file in the archive.
if not tf and not ts:
return nl[0]
else:
raise ZipUrlError("Could not find file in Zip {} for target='{}' nor segment='{}'"
.format(url.fspath, url.target_file, url.target_segment))
@staticmethod
def real_files_in_zf(zf):
"""Return a list of internal paths of real files in a zip file, based on the 'external_attr' values"""
from os.path import basename
for e in zf.infolist():
# Get rid of __MACOS and .DS_whatever
if basename(e.filename).startswith('__') or basename(e.filename).startswith('.'):
continue
# I really don't understand external_attr, but no one else seems to either,
# so we're just hacking here.
# e.external_attr>>31&1 works when the archive has external attrs set, and a dir heirarchy
# e.external_attr==0 works in cases where there are no external attrs set
|
# e.external_attr==32 is true for some single-file archives.
if bool(e.external_attr >> 31 & 1 or e.external_attr == 0 or e.external_attr == 32):
yield e.filename
@classmethod
def _match(cls, url, **kw
|
args):
return url.resource_format == 'zip' or kwargs.get('force_archive')
|
zhitiancheng/cliff
|
cliff/selection.py
|
Python
|
gpl-2.0
| 593 | 0 |
"""
This module is used to select features or proposals
"""
def select_preceding(features, k):
""" select preceding k features or proposals for each image
:param k: preceding k features or proposals for each image are selected
:type k: integer
:return: selected features or proposals
:
|
rtype: list. Each element is a k'-by-m ndarray, where m is feature
dimension or 4 for proposals. If there are enough features or proposals
for selection, then k' = k, else all features or proposals are
selected.
"""
return [i[:
|
k] for i in features]
|
YannThorimbert/ThorPy-1.4.3
|
thorpy/__init__.py
|
Python
|
mit
| 3,614 | 0.017432 |
__version__ = "1.4.3"
import sys
import os
# verify that pygame is on the machine
try:
import pygame
except Exception:
print("Pygame doesn't seem to be installed on this machine.")
# add thorpy folder to Windows and Python search paths
THORPY_PATH = os.path.abspath(os.path.dirname(__file__))
try:
os.environ['PATH'] = ';'.join((THORPY_PATH, os.environ['PATH']))
sys.path.append(THORPY_PATH)
except Exception:
print("Couldn't add Thor to sys.path...\nThorPy path : " + THORPY_PATH)
USEREVENT = pygame.USEREVENT + 1 #horpy takes one event on pygame's userevents
#import subpackages
import thorpy.elements
import thorpy.menus
import thorpy._utils
import thorpy.miscgui
import thorpy.painting as painting
import thorpy.miscgui.application as application
import thorpy.miscgui.storage as storage
import testmodule
# not all elements are imported ; only those that can be safely used by lambda
# user.
from thorpy.elements.launchers.boxlauncher import BoxLauncher
from thorpy.elements.launchers.browserlauncher import BrowserLauncher
from thorpy.elements.launchers.dropdownlistlauncher import DropDownListLauncher
from thorpy.elements.launchers._launcher import _Launcher
from thorpy.elements.background import Background
from thorpy.elements.image import Image
from thorpy.elements.box import Box, BarBox
from thorpy.elements.browserlight import BrowserLight
from thorpy.elements.browser import Browser
from thorpy.elements.checker import Checker
from thorpy.elements.clickable import Clickable
from thorpy.elements._wrappers import make_button, make_text
from thorpy.elements.colorsetter import ColorSetter
from thorpy.elements.ddlf import DropDownListFast as DropDownList
from thorpy.elements.draggable import Draggable, ClickDraggable
from thorpy.elements.element import Element
from thorpy.elements.ghost import Ghost
from thorpy.elements.hoverable import Hoverable
from thorpy.elements.hoverzone import HoverZone
from thorpy.elements.inserter import Inserter
from thorpy.elements.keypressable import KeyPressable
from thorpy.elements.keytogglable import KeyTogglable
from thorpy.elements.launchers.paramsetter import ParamSetter
from thorpy.elements.pressable import Pressable
##from thorpy.elements.text import MultilineText
from thorpy.elements.text import OneLineText, MultilineText
from thorpy.elements.slidersetter import SliderXSetter as SliderX
from thorpy.elements.togglable import Togglable
from thorpy.elements.line import Line
from thorpy.elements._makeuputils._halo import Halo
from thorpy.elements._makeuputils._shadow import StaticShadow
from thorpy.elements._makeuputils._shadow import DynamicShadow
# menus:
from thorpy.menus.tickedmenu import TickedMenu as Menu
from thorpy.menus.basicmenu import BasicMenu
# miscellaneous stuff, constants, parameters
from thorpy.miscgui.application import Application
from thorpy.miscgui.reaction import Reaction, ConstantReaction
from thorpy.miscgui import constants, functions
from thorpy
|
.miscgui import style
from thorpy.miscgui import painterstyle
from thorpy.miscgui import parameters
from thorpy.miscgui.initializer import Initializer
fro
|
m thorpy.miscgui.state import State
from thorpy.miscgui.storage import Storer, store
from thorpy.miscgui.title import Title
from thorpy.miscgui.varset import VarSet
from thorpy.miscgui import theme
from thorpy.miscgui.theme import set_theme as set_theme
from thorpy.painting.writer import Writer
from thorpy.painting import painters
from thorpy.painting import makeup
from thorpy.gamestools.basegrid import BaseGrid
from thorpy.gamestools.grid import Grid
del thorpy, pygame, os, sys
|
ajaygarg84/sugar
|
src/jarabe/journal/palettes.py
|
Python
|
gpl-2.0
| 15,839 | 0.000126 |
# Copyright (C) 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
import logging
import os
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GConf
from gi.repository import Gio
from gi.repository import GLib
from sugar3.graphics import style
from sugar3.graphics.palette import Palette
from sugar3.graphics.menuitem import MenuItem
from sugar3.graphics.icon import Icon
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics.alert import Alert
from sugar3 import mime
from jarabe.model import friends
from jarabe.model import filetransfer
from jarabe.model import mimeregistry
from jarabe.journal import misc
from jarabe.journal import model
from jarabe.journal import journalwindow
class ObjectPalette(Palette):
__gtype_name__ = 'ObjectPalette'
__gsignals__ = {
'detail-clicked': (GObject.SignalFlags.RUN_FIRST, None,
([str])),
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, metadata, detail=False):
self._metadata = metadata
activity_icon = Icon(icon_size=Gtk.IconSize.LARGE_TOOLBAR)
activity_icon.props.file = misc.get_icon_name(metadata)
color = misc.get_icon_color(metadata)
activity_icon.props.xo_color = color
if 'title' in metadata:
title = GObject.markup_escape_text(metadata['title'])
else:
title = GLib.markup_escape_text(_('Untitled'))
Palette.__init__(self, primary_text=title,
icon=activity_icon)
if misc.get_activities(metadata) or misc.is_bundle(metadata):
if metadata.get('activity_id', ''):
resume_label = _('Resume')
resume_with_label = _('Resume with')
else:
resume_label = _('Start')
resume_with_label = _('Start with')
menu_item = MenuItem(resume_label, 'activity-start')
menu_item.connect('activate', self.__start_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(resume_with_label, 'activity-start')
self.menu.append(menu_item)
menu_item.show()
start_with_menu = StartWithMenu(self._metadata)
menu_item.set_submenu(start_with_menu)
else:
menu_item = MenuItem(_('No activity to start entry'))
menu_item.set_sensitive(False)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Copy to'))
icon = Icon(icon_name='edit-copy', xo_color=color,
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
self.menu.append(menu_item)
menu_item.show()
copy_menu = CopyMenu(metadata)
copy_menu.connect('volume-error', self.__volume_error_cb)
menu_item.set_submenu(copy_menu)
if self._metadata['mountpoint'] == '/':
menu_item = MenuItem(_('Duplicate'))
icon = Icon(icon_name='edit-duplicate', xo_color=color,
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
menu_item.connect('activate', self.__duplicate_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Send to'), 'document-send')
self.menu.append(menu_item)
menu_item.show()
friends_menu = FriendsMenu()
friends_menu.connect('friend-selected', self.__friend_selected_cb)
menu_item.set_submenu(friends_menu)
if detail == True:
menu_item = MenuItem(_('View Details'), 'go-right')
menu_item.connect('activate', self.__detail_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Erase'), 'list-remove')
menu_item.connect('activate', self.__erase_activate_cb)
self.menu.append(menu_item)
menu_item.show()
def __start_activate_cb(self, menu_item):
misc.resume(self._metadata)
def __duplicate_activate_cb(self, menu_item):
file_path = model.get_file(self._metadata['uid'])
try:
model.copy(self._metadata, '/')
except IOError, e:
logging.exception('Error while copying the entry. %s', e.strerror)
self.emit('volume-error',
_('Error while copying the entry. %s') % e.strerror,
_('Error'))
def __erase_activate_cb(self, menu_item):
alert = Alert()
erase_string = _('Erase')
alert.props.title = erase_string
alert.props.msg = _('Do you want to permanently erase \"%s\"?') \
% self._metadata['title']
icon = Icon(icon_name='dialog-cancel')
alert.add_button(Gtk.ResponseType.CANCEL, _('Cancel'), icon)
icon.show()
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, erase_string, ok_icon)
ok_icon.show()
|
alert.connect('response', self.__erase_alert_response_cb)
journalwindow.get_journal_window().add_alert(alert)
alert.show()
def __erase_alert_response_cb(self, alert, response_id):
journalwindow.get_journal_window().remove_alert(alert)
if response_id is Gtk.ResponseType
|
.OK:
model.delete(self._metadata['uid'])
def __detail_activate_cb(self, menu_item):
self.emit('detail-clicked', self._metadata['uid'])
def __volume_error_cb(self, menu_item, message, severity):
self.emit('volume-error', message, severity)
def __friend_selected_cb(self, menu_item, buddy):
logging.debug('__friend_selected_cb')
file_name = model.get_file(self._metadata['uid'])
if not file_name or not os.path.exists(file_name):
logging.warn('Entries without a file cannot be sent.')
self.emit('volume-error',
_('Entries without a file cannot be sent.'),
_('Warning'))
return
title = str(self._metadata['title'])
description = str(self._metadata.get('description', ''))
mime_type = str(self._metadata['mime_type'])
if not mime_type:
mime_type = mime.get_for_file(file_name)
filetransfer.start_transfer(buddy, file_name, title, description,
mime_type)
class CopyMenu(Gtk.Menu):
__gtype_name__ = 'JournalCopyMenu'
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, metadata):
Gtk.Menu.__init__(self)
self._metadata = metadata
clipboard_menu = ClipboardMenu(self._metadata)
clipboard_menu.set_image(Icon(icon_name='toolbar-edit',
icon_size=Gtk.IconSize.MENU))
clipboard_menu.connect('volume-error', self.__volume_error_cb)
self.append(clipboard_menu)
clipboard_menu.show()
if self._metadata['mountpoint'] != '/':
client = GConf.Client.get_default()
color = XoColor(client.get_string('/desktop/sugar/user/color'))
journal_menu = VolumeMenu(self._metadata, _('Journal'), '/')
journal_menu.set_image(Icon(icon_name='activity-jo
|
zhongjingjogy/SmartQQBot
|
smartqq/main.py
|
Python
|
gpl-2.0
| 2,757 | 0.002176 |
#coding=utf-8
import argparse
import json
import os
from smartqq import start_qq, list_messages, create_db
def load_pluginconfig(configjson):
config = None
if configjson is not None:
if os.path.isfile(configjson):
with open(configjson, "r") as f:
config = json.load(f)
else:
print("unable to load the configuration file for plugins, default settings will be used.")
return config
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-gui",
action="store_true",
default=False,
help="Whether display QRCode wit
|
h tk and PIL."
)
parser.add_argument(
"--new-use
|
r",
action="store_true",
default=False,
help="Logout old user first(by clean the cookie file.)"
)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Switch to DEBUG mode for better view of requests and responses."
)
parser.add_argument(
"--plugin",
default="config.json",
help="Specify the json file for the setting of the plugins."
)
parser.add_argument(
"--cookie",
default="cookie.data",
help="Specify the storage path for cookie."
)
parser.add_argument(
"--vpath",
default="./v.jpg",
help="Specify the storage path for login bar code."
)
parser.add_argument(
"--list",
action="store_true",
default=False,
help="List the recored qq messages."
)
parser.add_argument(
"--create",
action="store_true",
default=False,
help="List the recored qq messages."
)
options = parser.parse_args()
configjson = load_pluginconfig(options.plugin)
try:
configjson = load_pluginconfig(options.plugin)
print("got json: %s" % configjson)
except:
print("using default setting")
configjson = {
"dbhandler": "sqlite:///message-record.db",
"plugin_root": "./plugins",
"plugins": [
"pluginmanage",
"plugindemo"
]
}
if options.list:
list_messages()
elif options.create:
create_db(configjson["dbhandler"])
else:
try:
start_qq(
plugin_setting=configjson,
no_gui=options.no_gui,
new_user=options.new_user,
debug=options.debug,
dbhandler=configjson["dbhandler"],
cookie_file=options.cookie,
vpath=options.vpath
)
except KeyboardInterrupt:
exit(0)
if __name__ == "__main__":
main()
|
thomasyu888/synapsePythonClient
|
tests/integration/synapseutils/test_synapseutils_sync.py
|
Python
|
apache-2.0
| 11,189 | 0.003582 |
import uuid
import os
import time
import tempfile
import pandas as pd
import pytest
from synapseclient.core.exceptions import SynapseHTTPError
from synapseclient import Entity, File, Folder, Link, Project, Schema
import synapseclient.core.utils as utils
import synapseutils
from tests.integration import QUERY_TIMEOUT_SEC
@pytest.fixture(scope='module', autouse=True)
def test_state(syn, schedule_for_cleanup):
class TestState:
def __init__(self):
self.syn = syn
self.project = syn.store(Project(name=str(uuid.uuid4())))
self.folder = syn.store(Folder(name=str(uuid.uuid4()), parent=self.project))
self.schedule_for_cleanup = schedule_for_cleanup
# Create testfiles for upload
self.f1 = utils.make_bogus_data_file(n=10)
self.f2 = utils.make_bogus_data_file(n=10)
self.f3 = 'https://www.synapse.org'
self.header = 'path parent used executed activityName synapseStore foo\n'
self.row1 = '%s %s %s "%s;https://www.example.com" provName bar\n' % (
self.f1, self.project.id, self.f2, self.f3
)
self.row2 = '%s %s "syn12" "syn123;https://www.example.com" provName2 bar\n' % (
self.f2, self.folder.id
)
self.row3 = '%s %s "syn12" prov2 False baz\n' % (self.f3, self.folder.id)
self.row4 = '%s %s %s act 2\n' % (self.f3, self.project.id, self.f1) # Circular reference
self.row5 = '%s syn12 \n' % (self.f3) # Wrong parent
test_state = TestState()
schedule_for_cleanup(test_state.project)
schedule_for_cleanup(test_state.f1)
schedule_for_cleanup(test_state.f2)
return test_state
def _makeManifest(content, schedule_for_cleanup):
with tempfile.NamedTemporaryFile(mode='w', suffix=".dat", delete=False) as f:
f.write(content)
filepath = utils.normalize_path(f.name)
schedule_for_cleanup(filepath)
return filepath
def test_readManifest(test_state):
"""Creates multiple manifests and verifies that they validate correctly"""
# Test manifest with missing columns
manifest = _makeManifest(
'"path"\t"foo"\n#"result_data.txt"\t"syn123"',
test_state.schedule_for_cleanup
)
pytest.raises(ValueError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
# Test that there are no circular references in file and that Provenance is correct
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row2 + test_state.row4,
test_state.schedule_for_cleanup
)
pytest.raises(RuntimeError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
# Test non existent parent
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row5,
test_state.schedule_for_cleanup
)
pytest.raises(SynapseHTTPError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
# Test that all files exist in manifest
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row2 + '/bara/basdfasdf/8hiuu.txt syn123\n',
test_state.schedule_for_cleanup
)
pytest.raises(IOError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
def test_syncToSynapse(test_state):
# Test upload of accurate manifest
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row2 + test_state.row3,
test_state.schedule_for_cleanup
)
synapseutils.syncToSynapse(test_state.syn, manifest, sendMessages=False, retries=2)
# syn.getChildren() used by syncFromSynapse() may intermittently have timing issues
time.sleep(3)
# Download using syncFromSynapse
tmpdir = tempfile.mkdtemp()
test_state.schedule_for_cleanup(tmpdir)
synapseutils.syncFromSynapse(test_state.syn, test_state.project, path=tmpdir)
orig_df = pd.read_csv(manifest, sep='\t')
orig_df.index = [os.path.basename(p) for p in orig_df.path]
new_df = pd.read_csv(os.path.join(tmpdir, synapseutils.sync.MANIFEST_FILENAME), sep='\t')
new_df.index = [os.path.basename(p) for p in new_df.path]
assert len(orig_df) == len(new_df)
new_df = new_df.loc[orig_df.index]
# Validate what was uploaded is in right location
assert new_df.parent.equals(orig_df.parent), 'Downloaded files not stored in same location'
# Validate that annotations were set
cols = synapseutils.sync.REQUIRED_FIELDS + synapseutils.sync.FILE_CONSTRUCTOR_FIELDS\
+ synapseutils.sync.STORE_FUNCTION_FIELDS + synapseutils.sync.PROVENANCE_FIELDS
orig_anots = orig_df.drop(cols, axis=1, errors='ignore')
new_anots = new_df.drop(cols, axis=1, errors='ignore')
assert orig_anots.shape[1] == new_anots.shape[1] # Verify that we have the same number of cols
assert new_anots.equals(orig_anots.loc[:, new_anots.columns]), 'Annotations different'
# Validate that provenance is correct
for provenanceType in ['executed', 'used']:
# Go through each row
for orig, new in zip(orig_df[provenanceType], new_df[provenanceType]):
if not pd.isnull(orig) and not pd.isnull(new):
# Convert local file paths into synId.versionNumber strings
orig_list = ['%s.%s' % (i.id, i.versionNumber) if isinstance(i, Entity) else i
for i in test_state.syn._convertProvenanceList(orig.split(';'))]
new_list = ['%s.%s' % (i.id, i.versionNumber) if isinstance(i, Entity) else i
for i in test_state.syn._convertProvenanceList(new.split(';'))]
assert set(orig_list) == set(new_list)
def test_syncFromSynapse(test_state):
"""This function tests recursive download as defined in syncFromSynapse
most of the functionality of this function
|
are already tested in the
tests/integration/test_command_line_client::test_command_get_recursive_and_query
|
which means that the only test if for path=None
"""
# Create a Project
project_entity = test_state.syn.store(Project(name=str(uuid.uuid4())))
test_state.schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload two files in Folder
uploaded_paths = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
test_state.syn.store(File(f, parent=folder_entity))
# Add a file in the project level as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
test_state.syn.store(File(f, parent=project_entity))
# syncFromSynapse() uses chunkedQuery() which will return results that are eventually consistent
# but not always right after the entity is created.
start_time = time.time()
while len(list(test_state.syn.getChildren(project_entity))) != 2:
assert time.time() - start_time < QUERY_TIMEOUT_SEC
time.sleep(2)
# Test recursive get
output = synapseutils.syncFromSynapse(test_state.syn, project_entity)
assert len(output) == len(uploaded_paths)
for f in output:
assert utils.normalize_path(f.path) in uploaded_paths
def test_syncFromSynapse__children_contain_non_file(test_state):
proj = test_state.syn.store(Project(name="test_syncFromSynapse_children_non_file" + str(uuid.uuid4())))
test_state.schedule_for_cleanup(proj)
temp_file = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(temp_file)
file_entity = test_state.syn.store(
File(
temp_file,
name="temp_file_test_syncFromSynapse_children_non_file" + str(uuid.uuid4()),
parent=proj
)
)
test_state.syn.store(Schema(name="table_test_syncFromSynapse", parent=proj))
temp_folder = tempfile.mkdtemp()
test_state.schedule_for_cleanup(temp_folder)
files_list = synapseutils.syncFromSynapse(test_state.syn, proj, temp_folder)
assert 1 == len(fil
|
aswolf/xmeos
|
xmeos/models/gamma.py
|
Python
|
mit
| 11,700 | 0.005215 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
from scipy import integrate
import scipy.interpolate as interpolate
from . import core
from . import refstate
__all__ = ['GammaEos','GammaCalc']
#====================================================================
# Base Class
#====================================================================
def set_calculator(eos_mod, kind, kind_opts):
assert kind in kind_opts, (
kind + ' is not a valid thermal calculator. '+
'You must select one of: ' + str(kind_opts))
eos_mod._kind = kind
if kind=='GammaPowLaw':
calc = _GammaPowLaw(eos_mod)
elif kind=='GammaShiftPowLaw':
calc = _GammaShiftPowLaw(eos_mod)
elif kind=='GammaFiniteStrain':
calc = _GammaFiniteStrain(eos_mod)
else:
raise NotImplementedError(kind+' is not a valid '+
'GammaEos Calculator.')
eos_mod._add_calculator(calc, calc_type='gamma')
pass
#====================================================================
class GammaEos(with_metaclass(ABCMeta, core.Eos)):
"""
EOS model for compression dependence of Grüneisen pa
|
rameter.
Parameters
----------
Thermodyn properties depend only on volume
"""
_kind_opts = ['GammaPowLaw','GammaShiftPowLaw','GammaFiniteStrain']
def __init__(self, kind='GammaPowLaw', natom=1, model_state={}):
self._pre_init(natom=natom)
set_calculator(self, kind, self._kind_opts)
ref_compress_state='P0'
ref_thermal_state
|
='T0'
ref_energy_type = 'E0'
refstate.set_calculator(self, ref_compress_state=ref_compress_state,
ref_thermal_state=ref_thermal_state,
ref_energy_type=ref_energy_type)
# self._set_ref_state()
self._post_init(model_state=model_state)
pass
def __repr__(self):
calc = self.calculators['gamma']
return ("GammaEos(kind={kind}, natom={natom}, "
"model_state={model_state}, "
")"
.format(kind=repr(calc.name),
natom=repr(self.natom),
model_state=self.model_state
)
)
def _set_ref_state(self):
calc = self.calculators['gamma']
path_const = calc.path_const
if path_const=='S':
param_ref_names = []
param_ref_units = []
param_ref_defaults = []
param_ref_scales = []
else:
raise NotImplementedError(
'path_const '+path_const+' is not valid for ThermalEos.')
self._path_const = calc.path_const
self._param_ref_names = param_ref_names
self._param_ref_units = param_ref_units
self._param_ref_defaults = param_ref_defaults
self._param_ref_scales = param_ref_scales
pass
def gamma(self, V_a):
gamma_a = self.calculators['gamma']._calc_gamma(V_a)
return gamma_a
def gamma_deriv(self, V_a):
gamma_deriv_a = self.calculators['gamma']._calc_gamma_deriv(V_a)
return gamma_deriv_a
def temp(self, V_a, T0=None):
temp_a = self.calculators['gamma']._calc_temp(V_a, T0=T0)
return temp_a
#====================================================================
class GammaCalc(with_metaclass(ABCMeta, core.Calculator)):
"""
Abstract Equation of State class for a reference Compression Path
Path can either be isothermal (T=const) or adiabatic (S=const)
For this restricted path, thermodyn properties depend only on volume
"""
def __init__(self, eos_mod):
self._eos_mod = eos_mod
self._init_params()
self._path_const = 'S'
pass
@property
def path_const( self ):
return self._path_const
####################
# Required Methods #
####################
@abstractmethod
def _init_params( self ):
"""Initialize list of calculator parameter names."""
pass
@abstractmethod
def _calc_gamma(self, V_a):
pass
@abstractmethod
def _calc_gamma_deriv(self, V_a):
pass
@abstractmethod
def _calc_temp(self, V_a, T0=None):
pass
def _calc_theta(self, V_a):
theta0 = self.eos_mod.get_param_values(param_names=['theta0'])
theta = self._calc_temp(V_a, T0=theta0)
return theta
####################
# Optional Methods #
####################
# EOS property functions
def _calc_param_deriv(self, fname, paramname, V_a, dxfrac=1e-6):
scale_a, paramkey_a = self.get_param_scale(apply_expand_adj=True )
scale = scale_a[paramkey_a==paramname][0]
# print 'scale: ' + np.str(scale)
#if (paramname is 'E0') and (fname is 'energy'):
# return np.ones(V_a.shape)
try:
fun = getattr(self, fname)
# Note that self is implicitly included
val0_a = fun(V_a)
except:
assert False, 'That is not a valid function name ' + \
'(e.g. it should be press or energy)'
try:
param = core.get_params([paramname])[0]
dparam = scale*dxfrac
# print 'param: ' + np.str(param)
# print 'dparam: ' + np.str(dparam)
except:
assert False, 'This is not a valid parameter name'
# set param value in eos_d dict
core.set_params([paramname,], [param+dparam,])
# Note that self is implicitly included
dval_a = fun(V_a) - val0_a
# reset param to original value
core.set_params([paramname], [param])
deriv_a = dval_a/dxfrac
return deriv_a
def _calc_energy_perturb(self, V_a):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
fname = 'energy'
scale_a, paramkey_a = self.get_param_scale(
apply_expand_adj=self.expand_adj)
Eperturb_a = []
for paramname in paramkey_a:
iEperturb_a = self._calc_param_deriv(fname, paramname, V_a)
Eperturb_a.append(iEperturb_a)
Eperturb_a = np.array(Eperturb_a)
return Eperturb_a, scale_a, paramkey_a
#====================================================================
# Implementations
#====================================================================
class _GammaPowLaw(GammaCalc):
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaPowLaw, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 1.0
q = 1.0
self._param_names = ['V0', 'gamma0', 'q']
self._param_units = ['ang^3', '1', '1']
self._param_defaults = [V0, gamma0, q]
self._param_scales = [V0, gamma0, q]
pass
def _calc_gamma(self, V_a):
V0, gamma0, q = self.eos_mod.get_param_values(
param_names=['V0','gamma0','q'])
gamma_a = gamma0 *(V_a/V0)**q
return gamma_a
def _calc_gamma_deriv(self, V_a):
q, = self.eos_mod.get_param_values(param_names=['q'])
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = q*gamma_a/V_a
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
# T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0])
gamma0, q = self.eos_mod.get_param_values(
param_names=['gamma0','q'])
gamma_a = self._calc_gamma(V_a)
T_a = T0*np.exp(-(gamma_a - gamma0)/q)
return T_a
#====================================================================
class _GammaShiftPowLaw(GammaCalc):
"""
Shifted Power Law description of Grüneisen Parameter (Al’tshuler, 1987)
"""
_path_opts=['S']
def __init__(self, eos_mod):
|
MMaus/mutils
|
cmodels/__init__.py
|
Python
|
gpl-2.0
| 106 | 0 |
# -*- coding: utf-8 -*-
"""
Crea
|
ted on Fri Dec 2
|
3 15:22:48 2011
@author: moritz
"""
__all__ = ["bslip"]
|
alfkjartan/nvgimu
|
nvg/testing/trajectories.py
|
Python
|
gpl-3.0
| 2,671 | 0.002995 |
"""
Utilities for testing trajectories.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
|
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should
|
have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from imusim.testing.quaternions import assertQuaternionAlmostEqual
from imusim.maths.quaternions import QuaternionArray
from imusim.testing.vectors import assert_vectors_correlated
from imusim.utilities.time_series import TimeSeries
import numpy as np
def checkTrajectory(T, truePositions, trueRotations):
"""
Check the outputs of a trajectory model agree with truth values.
@param T: Trajectory to check.
@param truePositions: L{TimeSeries} of true position values.
@param trueRotations: L{TimeSeries} of true rotation values.
"""
# Get time indices at which position comparisons valid
t = truePositions.timestamps
validity = (t >= T.startTime) & (t <= T.endTime)
t = t[validity]
dt = np.gradient(t)
p = truePositions.values[:,validity]
# Check position
assert_vectors_correlated(T.position(t), p)
# Check velocity
v = np.array(map(np.gradient, p)) / dt
assert_vectors_correlated(T.velocity(t[2:-2]), v[:,2:-2])
# Check acceleration
a = np.array(map(np.gradient, v)) / dt
assert_vectors_correlated(T.acceleration(t[4:-4]), a[:,4:-4])
# Get time indices at which rotation comparisons valid
t = trueRotations.timestamps
validity = (t >= T.startTime) & (t <= T.endTime)
t = t[validity]
r = trueRotations.values[validity]
# Check rotation
assertQuaternionAlmostEqual(T.rotation(t), r, tol=0.05)
# Check angular velocity
r, lastR = r[1:], r[:-1]
t, dt = t[1:], np.diff(t)
diffOmega = (2 * (r - lastR) * lastR.conjugate).array.T[1:] / dt
trajOmega = T.rotationalVelocity(t - dt/2)
assert_vectors_correlated(trajOmega[:,2:-2], diffOmega[:,2:-2])
# Check angular acceleration
diffAlpha = np.array(map(np.gradient, diffOmega)) / dt
trajAlpha = T.rotationalAcceleration(t - dt/2)
assert_vectors_correlated(trajAlpha[:,4:-4], diffAlpha[:,4:-4])
|
cgwire/zou
|
tests/shots/test_breakdown.py
|
Python
|
agpl-3.0
| 4,949 | 0 |
from tests.base import ApiDBTestCase
from zou.app.models.entity import Entity
class BreakdownTestCase(ApiDBTestCase):
def setUp(self):
super(BreakdownTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_asset_type()
self.generate_fixture_asset_types()
self.generate_fixture_episode()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_asset()
self.generate_fixture_asset_character()
def test_update_casting(self):
self.project_id = str(self.project.id)
self.shot_id = str(self.shot.id)
self.asset_id = str(self.asset.id)
self.asset_character_id = str(self.asset_character.id)
self.asset_type_character_id = str(self.asset_type_character.id)
self.shot_name = self.shot.name
self.sequence_name = self.sequence.name
self.episode_name = self.episode.name
casting = self.get(
"/data/projects/%s/entities/%s/casting"
% (self.project_id, self.shot_id)
)
self.assertListEqual(casting, [])
newCasting = [
{"asset_id": self.asset_id, "nb_occurences": 1},
{"asset_id": self.asset_character_id, "nb_occurences": 3},
]
path = "/data/shots/%s/casting" % str(self.shot_id)
path = "/data/projects/%s/entities/%s/casting" % (
self.project_id,
self.shot_id,
)
self.put(path, newCasting, 200)
casting = self.get(
"/data/projects/%s/entit
|
ies/%s/casting"
% (self.project_id, self.shot_id)
)
casting = sorted(casting, key=lambda x: x["nb_occurences"])
self.assertEqual(casting[0]["asset_id"], newCasting[0]["asset_id"])
self.assertEqual(
casting[0]["nb_occurences"], newCasting[0]["nb_occurences"]
)
self.assertEqual(casting[1]["asset_id"], newCasting[1]["asset_id"])
self.assertEqual
|
(
casting[1]["nb_occurences"], newCasting[1]["nb_occurences"]
)
self.assertEqual(casting[1]["asset_name"], self.asset_character.name)
self.assertEqual(
casting[1]["asset_type_name"], self.asset_type_character.name
)
cast_in = self.get("/data/assets/%s/cast-in" % self.asset_id)
self.assertEqual(cast_in[0]["shot_name"], self.shot.name)
self.assertEqual(cast_in[0]["sequence_name"], self.sequence.name)
self.assertEqual(cast_in[0]["episode_name"], self.episode.name)
def test_get_assets_for_shots(self):
self.entities = self.generate_data(
Entity,
3,
entities_out=[],
entities_in=[],
instance_casting=[],
project_id=self.project.id,
entity_type_id=self.asset_type.id,
)
self.shot.entities_out = self.entities
self.shot.save()
assets = self.get("data/shots/%s/assets" % self.shot.id)
self.assertEqual(len(assets), 3)
self.assertTrue(
assets[0]["id"] in [str(entity.id) for entity in self.entities]
)
def test_update_asset_casting(self):
self.asset_id = str(self.asset.id)
self.asset_character_id = str(self.asset_character.id)
self.asset_type_character_id = str(self.asset_type_character.id)
casting = self.get("/data/assets/%s/casting" % self.asset_id)
self.assertListEqual(casting, [])
newCasting = [
{"asset_id": self.asset_character_id, "nb_occurences": 3}
]
path = "/data/assets/%s/casting" % str(self.asset_id)
self.put(path, newCasting, 200)
casting = self.get("/data/assets/%s/casting" % self.asset_id)
casting = sorted(casting, key=lambda x: x["nb_occurences"])
self.assertEqual(casting[0]["asset_id"], newCasting[0]["asset_id"])
self.assertEqual(
casting[0]["nb_occurences"], newCasting[0]["nb_occurences"]
)
self.assertEqual(casting[0]["asset_name"], self.asset_character.name)
cast_in = self.get("/data/assets/%s/cast-in" % self.asset_character_id)
self.assertEqual(len(cast_in), 1)
self.assertEqual(cast_in[0]["asset_name"], self.asset.name)
def test_get_casting_for_assets(self):
self.entities = self.generate_data(
Entity,
3,
entities_out=[],
entities_in=[],
instance_casting=[],
project_id=self.project.id,
entity_type_id=self.asset_type.id,
)
self.asset.entities_out = self.entities
self.asset.save()
assets = self.get("data/assets/%s/assets" % self.asset.id)
self.assertEqual(len(assets), 3)
self.assertTrue(
assets[0]["id"] in [str(entity.id) for entity in self.entities]
)
|
dims/cinder
|
cinder/volume/drivers/netapp/dataontap/client/client_base.py
|
Python
|
apache-2.0
| 15,903 | 0 |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import socket
import sys
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW, _LI
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
self.connection = netapp_api.NaServer(
host=kwargs['hostname'],
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=kwargs['username'],
password=kwargs['password'])
def _init_features(self):
"""Set up the repository of available Data O
|
NTAP features."""
self.features = na_utils.Features()
def get_ontapi_version(self
|
, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def get_connection(self):
return self.connection
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def send_request(self, api_name, api_args=None, enable_tunneling=True):
"""Sends request to Ontapi."""
request = netapp_api.NaElement(api_name)
if api_args:
request.translate_struct(api_args)
return self.connection.invoke_successfully(request, enable_tunneling)
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': six.text_type(size),
'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']})
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s"),
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s'), {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s"), {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info(_LI("Resizing LUN %s directly to new size."), seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] =\
result.get_child_content("bytes-per-sector")
geometry['sectors_per_track'] =\
result.get_child_content("sectors-per-track")
geometry['tracks_per_cylinder'] =\
result.get_child_content("tracks-per-cylinder")
geometry['cylinders'] =\
result.get_child_content("cylinders")
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"),
{'path': path, 'msg': e.message})
return geometry
def get_volume_options(self, volume_name):
"""G
|
rlindner81/pyload
|
module/plugins/crypter/NosvideoCom.py
|
Python
|
gpl-3.0
| 917 | 0.002181 |
# -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class NosvideoCom(SimpleCrypter):
__name__ = "NosvideoCom"
__type__ = "crypter"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?nosvideo\.com/\?v=\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No",
|
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is gre
|
ater than minutes", 10)]
__description__ = """Nosvideo.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("igel", "igelkun@myopera.com")]
LINK_PATTERN = r'href="(http://(?:w{3}\.)?nosupload\.com/\?d=\w+)"'
NAME_PATTERN = r'<[tT]itle>Watch (?P<N>.+?)<'
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/r-modelmetrics/package.py
|
Python
|
lgpl-2.1
| 1,657 | 0.001207 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Fre
|
e Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RModelmetrics(RPackage):
"""Collect
|
ion of metrics for evaluating models written in C++ using
'Rcpp'."""
homepage = "https://cran.r-project.org/package=ModelMetrics"
url = "https://cran.r-project.org/src/contrib/ModelMetrics_1.1.0.tar.gz"
version('1.1.0', 'd43175001f0531b8810d2802d76b7b44')
depends_on('r@3.2.2:')
depends_on('r-rcpp', type=('build', 'run'))
|
stack-of-tasks/sot-stabilizer
|
python/scripts/robotViewerLauncher.py
|
Python
|
lgpl-3.0
| 1,912 | 0.028766 |
import sys
# --- ROBOT DYNAMIC SIMULATION -------------------------------------------------
from dynamic_graph.sot.hrp2_14.robot import
|
Robot
robot = Robot( 'robot' )
# --- LINK ROBOT VIEWER -------------------------------------------------------
from dynamic_graph.sot.core.utils.viewer_helper import addRobotViewer
addRobotViewer(robot.device,small=True,verbose=False)
robot.timeStep=5e-3
usingRobotViewer = True
from dynamic_graph.sot.cor
|
e import Stack_of_vector
acc = Stack_of_vector('acc')
gyr = Stack_of_vector('gyr')
acc.selec1(0,2)
acc.selec2(0,1)
gyr.selec1(0,2)
gyr.selec2(0,1)
acc.sin1.value=(0.0,0.0)
acc.sin2.value=(9.8,)
gyr.sin1.value=(0.0,0.0)
gyr.sin2.value=(0.0,)
robot.device.accelerometer = acc.sout
robot.device.gyrometer = gyr.sout
robot.device.forceLLEG.value = (0,0,284,0,0,0)
robot.device.forceRLEG.value = (0,0,284,0,0,0)
# --- MAIN LOOP ----------------------------------------------------------------
from dynamic_graph.sot.core.utils.thread_interruptible_loop import loopInThread,optionalparentheses,loopShortcuts
refreshList = list()
@loopInThread
def loop():
robot.device.increment(robot.timeStep)
for cmd in refreshList: cmd()
runner=loop()
[go,stop,next,n] = loopShortcuts(runner)
@optionalparentheses
def iter(): print 'iter = ',robot.device.state.time
@optionalparentheses
def status(): print runner.isPlay
# ----------------------------------------------------------------------
for scripts in sys.argv[1:]:
if scripts[0]!='+':
raw_input('Enter when you are ready to execute **'+scripts+'** :')
else: scripts = scripts[1:]
loop = scripts[0]=='*'
if loop: scripts = scripts[1:]
while True:
if scripts[0]=='=':
print "["+scripts[1:]+"]"
exec(scripts[1:])
else:
execfile(scripts)
if loop: raw_input('Again <'+scripts+'> ?')
else: break
|
zstackio/zstack-woodpecker
|
zstackwoodpecker/zstackwoodpecker/zstack_test/vid_checker/zstack_vid_checker.py
|
Python
|
apache-2.0
| 135,498 | 0.006295 |
'''
IAM2 Vid Attribute Checker.
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.affinitygroup_operations as ag_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.vxlan_operations as vxlan_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import zstackwoodpecker.operations.zwatch_operations as zwt_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_lib as test_lib
import time
import os
class zstack_vid_attr_checker(checker_header.TestChecker):
def __init__(self):
super(zstack_vid_attr_checker, self).__init__()
def check_login_by_vid(self, username, password):
session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
def check_login_by_account(self, username, password):
session_uuid = acc_ops.login_by_account(username, password)
def check_vm_operation(self, session_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_cond
|
itions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions, session_uuid=session_uuid)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
|
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions, session_uuid=session_uuid)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions, session_uuid=session_uuid)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_policy_checker')
vm_creation_option.set_session_uuid(session_uuid)
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
# VM related ops: Create, Delete, Expunge, Start, Stop, Suspend, Resume, Migrate
vm_ops.stop_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=session_uuid)
candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm_uuid)
if candidate_hosts != None and test_lib.lib_check_vm_live_migration_cap(vm):
try:
vm_ops.migrate_vm(vm_uuid, candidate_hosts.inventories[0].uuid, session_uuid=session_uuid)
except:
vm_ops.migrate_vm(vm_uuid, candidate_hosts[0].uuid, session_uuid=session_uuid)
vm_ops.stop_vm(vm_uuid, force='cold', session_uuid=session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.suspend_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.resume_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.destroy_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.expunge_vm(vm_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_image_operation(self, session_uuid=None):
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, session_uuid=session_uuid)[0]
image_option = test_util.ImageOption()
image_option.set_name('image_policy_checker')
image_option.set_description('image for policy check')
image_option.set_format('raw')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_backup_storage_uuid_list([bs.uuid])
image_option.url = "http://fake_iamge/image.raw"
image_option.set_session_uuid(session_uuid)
image_uuid = img_ops.add_image(image_option).uuid
img_ops.sync_image_size(image_uuid, session_uuid=session_uuid)
img_ops.change_image_state(image_uuid, 'disable', session_uuid=session_uuid)
img_ops.change_image_state(image_uuid, 'enable', session_uuid=session_uuid)
if bs.type == 'ImageStoreBackupStorage':
img_ops.export_image_from_backup_storage(image_uuid, bs.uuid, session_uuid=session_uuid)
img_ops.delete_exported_image_from_backup_storage(image_uuid, bs.uuid, session_uuid=session_uuid)
img_ops.set_image_qga_enable(image_uuid, session_uuid=session_uuid)
img_ops.set_image_qga_disable(image_uuid, session_uuid=session_uuid)
cond = res_ops.gen_query_conditions('name', '=', "image_policy_checker")
image = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid=session_uuid)
if image == None:
test_util.test_fail('fail to query image just added')
return self.judge(False)
img_ops.delete_image(image_uuid, session_uuid=session_uuid)
img_ops.expunge_image(image_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_snapshot(self, session_uuid=None):
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, session_uuid=session_uuid)[0]
disk_offering_uuid = res_ops.query_resource(res_ops.DISK_OFFERING, session_uuid=session_uuid)[0].uuid
volume_option = test_util.VolumeOption()
volume_option.set_disk_offering_uuid(disk_offering_uuid)
volume_option.set_session_uuid(session_uuid)
volume_option.set_name('data_volume_for_snapshot_policy_checker')
data_volume = vol_ops.create_volume_from_offering(volume_option)
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_without_create_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
vol_ops.attach_volume(data_volume.uuid, vm_uuid)
snapshot_option = test_util.SnapshotOption()
snapshot_option.set_volume_uuid(data_volume.uuid)
snapshot_option.set_name('snapshot_policy_checker')
snapshot_option.set_description('snapshot for policy check')
snapshot_option.set_session_uuid(session_uuid)
snapshot_uuid = vol_ops.create_snapshot(snapshot_option).uuid
vm_ops.stop_vm(vm_uuid, force='cold')
vol_ops.use_snapshot(snapshot_uuid, session_uuid)
#vol_ops.backup_snapshot(snapshot_uuid, bs.uuid, project_login_session_uuid)
#new_volume = vol_ops.create_volume_from_snapshot(snapshot_uuid)
#vol_ops.delete_snapshot_from_backupstorage(snapshot_uuid, [bs.uuid], session_uuid=project_login_session_uuid)
vol_ops.delete_snapshot(snapshot_uuid, session_uuid)
vol_ops.delete_volume(data_volume.uuid)
vol_ops.expunge_volume(data_volume.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
def check_volume_operation(self, session_uuid=None):
# Volume related ops: Create, Delete, Expunge, Attach, Dettach, Enable, Disable
disk_offering_uuid = res_ops.qu
|
tryolabs/luminoth
|
luminoth/tools/dataset/readers/__init__.py
|
Python
|
bsd-3-clause
| 670 | 0 |
from .base_reader import BaseReader, InvalidDataDirectory # noqa
from .object_detection import ObjectDetectionReader # noqa
from .object_detection import (
COCOReader, CSVReader, FlatReader, Ima
|
geNetReader, OpenImagesReader,
PascalVOCReader, TaggerineReader
)
READERS = {
'coco': COCOReader,
'csv': CSVReader,
'flat': FlatReader,
'imagenet': ImageNetReader,
'openimages': OpenImagesReader,
'pascal': PascalVOCReader,
|
'taggerine': TaggerineReader,
}
def get_reader(reader):
reader = reader.lower()
if reader not in READERS:
raise ValueError('"{}" is not a valid reader'.format(reader))
return READERS[reader]
|
strobo-inc/pc-nrfutil
|
nordicsemi/utility/__init__.py
|
Python
|
bsd-3-clause
| 1,580 | 0.000633 |
# Copyright (c) 2015, Nordic Semiconductor
# All rights reserved.
#
# Redistr
|
ibution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the follow
|
ing disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.s
"""Package marker file."""
|
endlessm/chromium-browser
|
third_party/grpc/src/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
|
Python
|
bsd-3-clause
| 26,919 | 0.000186 |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import importlib
import os
from os import path
import pkgutil
import shutil
import sys
import tempfile
import threading
import unittest
from six import moves
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
from grpc_tools import protoc
from tests.unit.framework.common import test_constants
_RELATIVE_PROTO_PATH = 'relative_proto_path'
_RELATIVE_PYTHON_OUT = 'relative_python_out'
_PROTO_FILES_PATH_COMPONENTS = (
(
'beta_grpc_plugin_test',
'payload',
'test_payload.proto',
),
(
'beta_grpc_plugin_test',
'requests',
'r',
'test_requests.proto',
),
(
'beta_grpc_plugin_test',
'responses',
'test_responses.proto',
),
(
'beta_grpc_plugin_test',
'service',
'test_service.proto',
),
)
_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2'
_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2'
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
STUB_IDENTIFIER = 'BetaTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
@contextlib.contextmanager
def _system_path(path_insertion):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
yield
sys.path = old_system_path
def _create_directory_tree(root, path_components_sequence):
created = set()
for path_components in path_components_sequence:
thus_far = ''
for path_component in path_components:
relative_path = path.join(thus_far, path_component)
if relative_path not in created:
os.makedirs(path.join(root, relative_path))
created.add(relative_path)
thus_far = path.join(thus_far, path_component)
def _massage_proto_content(raw_proto_content):
imports_substituted = raw_proto_content.replace(
b'import "tests/protoc_plugin/protos/',
b'import "beta_grpc_plugin_test/')
package_statement_substituted = imports_substituted.replace(
b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;')
return package_statement_substituted
def _packagify(directory):
for subdirectory, _, _ in os.walk(directory):
init_file_name = path.join(subdirectory, '__init__.py')
with open(init_file_name, 'wb') as init_file:
init_file.write(b'')
class _ServicerMethods(object):
def __init__(self, payload_pb2, responses_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
self._payload_pb2 = payload_pb2
self._responses_pb2 = responses_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = self._responses_pb2.SimpleResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = self._responses_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
@contextlib.contextmanager
def _CreateService(payload_pb2, responses_pb2, service_pb2):
"""Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
servicer_methods = _ServicerMethods(payload_pb2, responses_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, c
|
ontext):
return servicer_methods.Unary
|
Call(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield servicer_methods, stub
server.stop(0)
@contextlib.contextmanager
def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement me
|
slaymaker1907/hearthbreaker
|
tests/card_tests/id_mapping.py
|
Python
|
mit
| 23,324 | 0 |
id_mappings = {
"EX1_097": "Abomination",
"CS2_188": "Abusive Sergeant",
"EX1_007": "Acolyte of Pain",
"NEW1_010": "Al'Akir the Windlord",
"EX1_006": "Alarm-o-Bot",
"EX1_382": "Aldor Peacekeeper",
"EX1_561": "Alexstrasza",
"EX1_393": "Amani Berserker",
"CS2_038": "Ancestral Spirit",
"EX1_057": "Ancient Brewmaster",
"EX1_584": "Ancient Mage",
"NEW1_008b": "Ancient Secrets",
"NEW1_008a": "Ancient Teachings",
"EX1_045": "Ancient Watcher",
"NEW1_008": "Ancient of Lore",
"EX1_178": "Ancient of War",
"EX1_009": "Angry Chicken",
"EX1_398": "Arathi Weaponsmith",
"EX1_089": "Arcane Golem",
"EX1_559": "Archmage Antonidas",
"EX1_067": "Argent Commander",
"EX1_362": "Argent Protector",
"EX1_008": "Argent Squire",
"EX1_402": "Armorsmith",
"EX1_383t": "Ashbringer",
"EX1_591": "Auchenai Soulpriest",
"EX1_384": "Avenging Wrath",
"EX1_284": "Azure Drake",
"EX1_110t": "Baine Bloodhoof",
"EX1_014t": "Bananas",
"EX1_320": "Bane of Doom",
"EX1_249": "Baron Geddon",
"EX1_398t": "Battle Axe",
"EX1_392": "Battle Rage",
"EX1_165b": "Bear Form",
"EX1_549": "Bestial Wrath",
"EX1_126": "Betrayal",
"EX1_005": "Big Game Hunter",
"EX1_570": "Bite",
"CS2_233": "Blade Flurry",
"EX1_355": "Blessed Champion",
"EX1_363": "Blessing of Wisdom",
"CS2_028": "Blizzard",
"EX1_323w": "Blood Fury",
"CS2_059": "Blood Imp",
"EX1_590": "Blood Knight",
"EX1_012": "Bloodmage Thalnos",
"NEW1_025": "Bloodsail Corsair",
"NEW1_018": "Bloodsail Raider",
"EX1_407": "Brawl",
"EX1_091": "Cabal Shadow Priest",
"EX1_110": "Cairne Bloodhoof",
"NEW1_024": "Captain Greenskin",
"EX1_165a": "Cat Form",
"EX1_573": "Cenarius",
"EX1_621": "Circle of Healing",
"CS2_073": "Cold Blood",
"EX1_050": "Coldlight Oracle",
"EX1_103": "Coldlight Seer",
"NEW1_036": "Commanding Shout",
"EX1_128": "Conceal",
"EX1_275": "Cone of Cold",
"EX1_287": "Counterspell",
"EX1_059": "Crazed Alchemist",
"EX1_603": "Cruel Taskmaster",
"EX1_595": "Cult Master",
"skele21": "Damaged Golem",
"EX1_046": "Dark Iron Dwarf",
"EX1_617": "Deadly Shot",
"NEW1_030": "Deathwing",
"EX1_130a": "Defender",
"EX1_093": "Defender of Argus",
"EX1_131t": "Defias Bandit",
"EX1_131": "Defias Ringleader",
"EX1_573a": "Demigod's Favor",
"EX1_102": "Demolisher",
"EX1_596": "Demonfire",
"EX1_tk29": "Devilsaur",
"EX1_162": "Dire Wolf Alpha",
"EX1_166b": "Dispel",
"EX1_349": "Divine Favor",
"EX1_310": "Doomguard",
"EX1_567": "Doomhammer",
"NEW1_021": "Doomsayer",
"NEW1_022": "Dread Corsair",
"DREAM_04": "Dream",
"EX1_165t2": "Druid of the Claw (bear)",
"EX1_165": "Druid of the Claw",
"EX1_165t1": "Druid of the Claw (cat)",
"EX1_243": "Dust Devil",
"EX1_536": "Eaglehorn Bow",
"EX1_250": "Earth Elemental",
"EX1_245": "Earth Shock",
"CS2_117": "Earthen Ring Farseer",
"EX1_613": "Edwin VanCleef",
"DREAM_03": "Emerald Drake",
"EX1_170": "Emperor Cobra",
"EX1_619": "Equality",
"EX1_274": "Ethereal Arcanist",
"EX1_124": "Eviscerate",
"EX1_537": "Explosive Shot",
"EX1_610": "Explosive Trap",
"EX1_132": "Eye for an Eye",
"EX1_564": "Faceless Manipulator",
"NEW1_023": "Faerie Dragon",
"CS2_053": "Far Sight",
"EX1_301": "Felguard",
"CS1_069": "Fen Creeper",
"EX1_248": "Feral Spirit",
"EX1_finkle": "Finkle Einhorn",
"EX1_319": "Flame Imp",
"EX1_614t": "Flame of Azzinoth",
"EX1_544": "Flare",
"tt_004": "Flesheating Ghoul",
"EX1_571": "Force of Nature",
"EX1_251": "Forked Lightning",
"EX1_611": "Freezing Trap",
"EX1_283": "Frost Elemental",
"EX1_604": "Frothing Berserker",
"EX1_095": "Gadgetzan Auctioneer",
"DS1_188": "Gladiator's Longbow",
"NEW1_040t": "Gnoll",
"EX1_411": "Gorehowl",
"EX1_414": "Grommash Hellscream",
"NEW1_038": "Gruul",
"EX1_558": "Harrison Jones",
"EX1_556": "Harvest Golem",
"EX1_137": "Headcrack",
"EX1_409t": "Heavy Axe",
"NEW1_040": "Hogger",
"EX1_624": "Holy Fire",
"EX1_365": "Holy Wrath",
"EX1_538t": "Hound",
"NEW1_017": "Hungry Crab",
"EX1_534t": "Hyena",
"EX1_289": "Ice Barrier",
"EX1_295": "Ice Block",
"CS2_031": "Ice Lance",
"EX1_614": "Illidan Stormrage",
"EX1_598": "Imp",
"EX1_597": "Imp Master",
"EX1_tk34": "Infernal",
"CS2_181": "Injured Blademaster",
"CS1_129": "Inner Fire",
"EX1_607": "Inner Rage",
"CS2_203": "Ironbeak Owl",
"EX1_017": "Jungle Panther",
"EX1_166": "Keeper of the Grove",
"NEW1_005": "Kidnapper",
"EX1_543": "King Krush",
"EX1_014": "King Mukla",
"EX1_612": "Kirin Tor Mage",
"NEW1_019": "Knife Juggler",
"DREAM_01": "Laughing Sister",
"EX1_241": "Lava Burst",
"EX1_354": "Lay on Hands",
"EX1_160b": "Leader of the Pack",
"EX1_116": "Leeroy Jenkins",
"EX1_029": "Leper Gnome",
"EX1_238": "Lightning Bolt",
"EX1_259": "Lightning Storm",
"EX1_335": "Lightspawn",
"EX1_001": "Lightwarden",
"EX1_341": "Lightwell",
"EX1_096": "Loot Hoarder",
"EX1_323": "Lord Jaraxxus",
"EX1_100": "Lorewalker Cho",
"EX1_082": "Mad Bomber",
"EX1_563": "Malygos",
"EX1_055": "Mana Addict",
"EX1_575": "Mana Tide Totem",
"EX1_616": "Mana Wraith",
"NEW1_012": "Mana Wyrm",
"EX1_155": "Mark of Nature",
"EX1_155b": "Mark of Nature",
"EX1_155a": "Mark of Nature",
"EX1_626": "Mass Dispel",
"NEW1_037": "Master Swordsmith",
"NEW1_014": "Master of Disguise",
"NEW1_029": "Millhouse Manastorm",
"EX1_085": "Mind Control Tech",
"EX1_345": "Mindgames",
"EX1_294": "Mirror Entity",
"EX1_533": "Misdirection",
"EX1_396": "Mogu'shan Warden",
"EX1_620": "Molten Giant",
"EX1_166a": "Moonfire",
"EX1_408": "Mortal Strike",
"EX1_105": "Mountain Giant",
"EX1_509": "Murloc Tidecaller
|
",
"EX1_507": "Murloc Warleader",
"EX1_557": "Nat Pagle",
"EX1_161"
|
: "Naturalize",
"DREAM_05": "Nightmare",
"EX1_130": "Noble Sacrifice",
"EX1_164b": "Nourish",
"EX1_164a": "Nourish",
"EX1_164": "Nourish",
"EX1_560": "Nozdormu",
"EX1_562": "Onyxia",
"EX1_160t": "Panther",
"EX1_522": "Patient Assassin",
"EX1_133": "Perdition's Blade",
"EX1_076": "Pint-Sized Summoner",
"EX1_313": "Pit Lord",
"EX1_316": "Power Overwhelming",
"EX1_160": "Power of the Wild",
"EX1_145": "Preparation",
"EX1_583": "Priestess of Elune",
"EX1_350": "Prophet Velen",
"EX1_279": "Pyroblast",
"EX1_044": "Questing Adventurer",
"EX1_412": "Raging Worgen",
"EX1_298": "Ragnaros the Firelord",
"CS2_104": "Rampage",
"CS2_161": "Ravenholdt Assassin",
"EX1_136": "Redemption",
"EX1_379": "Repentance",
"EX1_178a": "Rooted",
"EX1_134": "SI:7 Agent",
"EX1_578": "Savagery",
"EX1_534": "Savannah Highmane",
"EX1_020": "Scarlet Crusader",
"EX1_531": "Scavenging Hyena",
"EX1_586": "Sea Giant",
"EX1_080": "Secretkeeper",
"EX1_317": "Sense Demons",
"EX1_334": "Shadow Madness",
"EX1_345t": "Shadow of Nothing",
"EX1_303": "Shadowflame",
"EX1_625": "Shadowform",
"EX1_144": "Shadowstep",
"EX1_573b": "Shan'do's Lesson",
"EX1_410": "Shield Slam",
"EX1_405": "Shieldbearer",
"EX1_332": "Silence",
"CS2_151": "Silver Hand Knight",
"EX1_023": "Silvermoon Guardian",
"EX1_309": "Siphon Soul",
"EX1_391": "Slam",
"EX1_554t": "Snake",
"EX1_554": "Snake Trap",
"EX1_609": "Snipe",
"EX1_608": "Sorcerer's Apprentice",
"EX1_158": "Soul of the Forest",
"NEW1_027": "Southsea Captain",
"CS2_146": "Southsea Deckhand",
"tt_010a": "Spellbender (minion)",
"tt_010": "Spellbender",
"EX1_048": "Spellbreaker",
"EX1_tk11": "Spirit Wolf",
"CS2_221": "Spiteful Smith",
"CS2_152": "Squire",
"EX1_tk28": "Squirrel",
"NEW1_041": "Stampeding Kodo",
"NEW1_007a": "Starfall",
|
normpad/iotatipbot
|
test/bot_api.py
|
Python
|
gpl-3.0
| 13,535 | 0.012412 |
import re
from iota import *
import praw
import sqlite3
import random
import string
from iota.adapter.wrappers import RoutingWrapper
import config
import urllib.request
from urllib.error import HTTPError
import json
import math
node_address = config.node_address
class api:
def __init__(self,seed,prod=True):
self.address_index = 1
if prod:
self.init_db()
self.iota_api = Iota(
RoutingWrapper(node_address)
.add_route('attachToTangle','http://localhost:14265'),seed)
def init_db(self):
self.conn = sqlite3.connect(config.database_name)
self.db = self.conn.cursor()
self.create_database()
self.address_index = len(self.db.execute("SELECT * FROM usedAddresses").fetchall())
def init_custom_db(self,name):
self.conn = sqlite3.connect(name)
self.db = self.conn.cursor()
self.create_database()
self.address_index = len(self.db.execute("SELECT * FROM usedAddresses").fetchall())
def get_iota_value(self,amount):
try:
with urllib.request.urlopen('https://api.coinmarketcap.com/v1/ticker/iota/') as url:
data = json.loads(url.read().decode())[0]
|
price = data['price_usd']
value = (amount/1000000)*float(price)
return value
except:
return am
|
ount/1000000
#---------IOTA API FUNCTIONS--------------#
def send_transfer(self,addr,amount):
ret = self.iota_api.send_transfer(
depth = 3,
transfers = [
ProposedTransaction(
address = Address(
addr
),
value = amount,
),
],
min_weight_magnitude=15
)
return ret
def get_account_balance(self):
addresses = self.iota_api.get_new_addresses(0,self.address_index)['addresses']
balances = self.iota_api.get_balances(addresses)['balances']
total = 0
for balance in balances:
total = total + balance
return total
def get_balance(self,address):
address_data = self.iota_api.get_balances([address])
return address_data['balances'][0]
def get_new_address(self):
addresses = self.iota_api.get_new_addresses(self.address_index,1)
for address in addresses['addresses']:
address = address.with_valid_checksum()
self.add_used_address(self.address_index,address._trytes.decode("utf-8"))
self.address_index = self.address_index + 1
if self.get_balance(address) > 0:
return self.get_new_address()
return address
def create_seed(self):
seed = ''.join(random.choice(string.ascii_uppercase + "9") for _ in range(81))
return seed
def check_transaction(self,transaction):
transaction_hash = transaction['bundle'].hash
inclusion_states = self.iota_api.get_latest_inclusion([transaction_hash])
return inclusion_states['states'][transaction_hash]
def replay_bundle(self,transaction):
transaction_hash = transaction['bundle'].tail_transaction.hash
self.iota_api.replay_bundle(transaction_hash,3,15)
#-------------MESSAGE REGEX FUNCTIONS---------------#
#Check if the message body or subject contains a fund/deposit request
def is_deposit_request(self,message):
fund_string = re.compile("Fund",re.I)
deposit_string = re.compile("Deposit",re.I)
match = fund_string.search(message.subject)
if match:
return True
match = fund_string.search(message.body)
if match:
return True
match = deposit_string.search(message.subject)
if match:
return True
match = deposit_string.search(message.body)
if match:
return True
return False
#Check if the message body or subject contains a withdraw request
def is_withdraw_request(self,message):
withdraw_string = re.compile("Withdraw",re.I)
match = withdraw_string.search(message.subject)
if match:
return True
match = withdraw_string.search(message.body)
if match:
return True
return False
#Check if the message body or subject contains a balance request
def is_balance_request(self,message):
balance_string = re.compile("Balance",re.I)
match = balance_string.search(message.subject)
if match:
return True
match = balance_string.search(message.body)
if match:
return True
return False
#Check if the message body or subject contains a help/commands request
def is_help_request(self,message):
help_string = re.compile("Help",re.I)
commands_string = re.compile("Commands",re.I)
match = help_string.search(message.subject)
if match:
return True
match = help_string.search(message.body)
if match:
return True
match = commands_string.search(message.subject)
if match:
return True
match = commands_string.search(message.body)
if match:
return True
return False
#Check if the message body contains an iota amount
def contains_iota_amount(self,message):
iota_amount_string = re.compile("([0-9]+)\s*iota",re.I)
miota_amount_string = re.compile("([0-9]+)\s*miota",re.I)
match = iota_amount_string.search(message.body)
if match:
return True
match = miota_amount_string.search(message.body)
if match:
return True
return False
#Return the iota amount refrenced in the message, convets miota to iota
def get_iota_tip_amount(self,message):
iota_amount_string = re.compile("\+\s*([0-9]+)\s*iota",re.I)
miota_amount_string = re.compile("\+\s*([0-9]+)\s*miota",re.I)
match = iota_amount_string.search(message.body)
if match:
return int(match.group(1))
match = miota_amount_string.search(message.body)
if match:
return (int(match.group(1))*1000000)
def get_iota_amount(self,message):
iota_amount_string = re.compile("([0-9]+)\s*iota",re.I)
miota_amount_string = re.compile("([0-9]+)\s*miota",re.I)
match = iota_amount_string.search(message.body)
if match:
return int(match.group(1))
match = miota_amount_string.search(message.body)
if match:
return (int(match.group(1))*1000000)
def get_message_address(self,message):
address_string = re.compile("[A-Z,9]{90}")
match = address_string.search(message.body)
if match:
return bytearray(match.group(0),"utf-8")
else:
return None
def is_tip(self,comment):
tip_string_iota = re.compile("\+\s*[0-9]+\s*iota",re.I)
tip_string_miota = re.compile("\+\s*[0-9]+\s*miota",re.I)
text = comment.body
match = tip_string_iota.search(text)
if match:
return True
match = tip_string_miota.search(text)
if match:
return True
return False
def is_donation_request(self,message):
donate_string = re.compile("donat",re.I)
match = donate_string.search(message.subject)
if match:
return True
match = donate_string.search(message.body)
if match:
return True
return False
#--------------------Database Functions----------------------#
def create_database(self):
self.db.execute("CREATE TABLE IF NOT EXISTS users (redditUsername TEXT PRIMARY KEY, balance INTEGER)")
self.conn.commit()
self.db.execute("CREATE TABLE IF NOT EXISTS commentsRepliedTo (commentId TEXT PRIMARY KEY)")
self.conn.commit()
self.db.execute("CREATE TABLE IF NOT EXISTS usedAddresses (addressIndex INTEGER PRIMARY KEY, address TEXT)")
self.
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/llvm-lld/package.py
|
Python
|
lgpl-2.1
| 1,980 | 0.000505 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class LlvmLld(CMakePackage):
"""lld - The LLVM Linker
lld is a new set of modular code for creating linker tools."""
homepage = "http://lld.llvm.org"
url = "http://llvm.org/releases/3.4/lld-3.4.src.tar.gz"
version('3.4', '3b6a17e58c8416c869c14dd37682f
|
78e')
depends_on('llvm')
depends_on('cmake@2.8:', type='build')
def cmake_args(self):
if 'CXXFLAGS' in env and env['CXXFLAGS']:
env['CXXFLAGS'] += ' ' + self.compiler.cxx11_flag
else:
env['CXXFLAGS'] = self.compiler.cxx11_flag
return [
'-DLLD_PATH_TO_LLVM_BUILD=%s' % self.spec['llvm'].prefix,
'-DLLVM_MAIN_SRC_DIR=%s' % self.spec['llvm'].prefix,
|
]
|
pheanex/ansible
|
lib/ansible/playbook/base.py
|
Python
|
gpl-3.0
| 18,228 | 0.002524 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import operator
import uuid
from functools import partial
from inspect import getmembers
from io import FileIO
from six import iteritems, string_types, text_type
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.debug import debug
from ansible.utils.vars import combine_vars, isidentifier
from ansible.template import template
class Base:
# connection/transport
_connection = FieldAttribute(isa='string')
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string')
# variables
_vars = FieldAttribute(isa='dict', default=dict(), priority=100)
# flags and misc. settings
_environment = FieldAttribute(isa='list')
_no_log = FieldAttribute(isa='bool')
# param names which have been deprecated/removed
DEPRECATED_ATTRIBUTES = [
'sudo', 'sudo_user', 'sudo_pass', 'sudo_exe', 'sudo_flags',
'su', 'su_user', 'su_pass', 'su_exe', 'su_flags',
]
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self.
|
_loader = None
self._variable_manager
|
= None
# every object gets a random uuid:
self._uuid = uuid.uuid4()
# and initialize the base attributes
self._initialize_base_attributes()
try:
from __main__ import display
self._display = display
except ImportError:
from ansible.utils.display import Display
self._display = Display()
# The following three functions are used to programatically define data
# descriptors (aka properties) for the Attributes of all of the playbook
# objects (tasks, blocks, plays, etc).
#
# The function signature is a little strange because of how we define
# them. We use partial to give each method the name of the Attribute that
# it is for. Since partial prefills the positional arguments at the
# beginning of the function we end up with the first positional argument
# being allocated to the name instead of to the class instance (self) as
# normal. To deal with that we make the property name field the first
# positional argument and self the second arg.
#
# Because these methods are defined inside of the class, they get bound to
# the instance when the object is created. After we run partial on them
# and put the result back into the class as a property, they get bound
# a second time. This leads to self being placed in the arguments twice.
# To work around that, we mark the functions as @staticmethod so that the
# first binding to the instance doesn't happen.
@staticmethod
def _generic_g(prop_name, self):
method = "_get_attr_%s" % prop_name
if hasattr(self, method):
return getattr(self, method)()
value = self._attributes[prop_name]
if value is None and hasattr(self, '_get_parent_attribute'):
value = self._get_parent_attribute(prop_name)
return value
@staticmethod
def _generic_s(prop_name, self, value):
self._attributes[prop_name] = value
@staticmethod
def _generic_d(prop_name, self):
del self._attributes[prop_name]
def _get_base_attributes(self):
'''
Returns the list of attributes for this class (or any subclass thereof).
If the attribute name starts with an underscore, it is removed
'''
base_attributes = dict()
for (name, value) in getmembers(self.__class__):
if isinstance(value, Attribute):
if name.startswith('_'):
name = name[1:]
base_attributes[name] = value
return base_attributes
def _initialize_base_attributes(self):
# each class knows attributes set upon it, see Task.py for example
self._attributes = dict()
for (name, value) in self._get_base_attributes().items():
getter = partial(self._generic_g, name)
setter = partial(self._generic_s, name)
deleter = partial(self._generic_d, name)
# Place the property into the class so that cls.name is the
# property functions.
setattr(Base, name, property(getter, setter, deleter))
# Place the value into the instance so that the property can
# process and hold that value/
setattr(self, name, value.default)
def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
for base_class in self.__class__.mro():
method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
return ds
def load_data(self, ds, variable_manager=None, loader=None):
''' walk the input datastructure and assign any values '''
assert ds is not None
# cache the datastructure internally
setattr(self, '_ds', ds)
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class. We sort them based on their priority
# so that certain fields can be loaded before others, if they are dependent.
# FIXME: we currently don't do anything with private attributes but
# may later decide to filter them out of 'ds' here.
base_attributes = self._get_base_attributes()
for name, attr in sorted(base_attributes.items(), key=operator.itemgetter(1)):
# copy the value over unless a _load_field method is defined
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
self._attributes[name] = method(name, ds[name])
else:
self._attributes[name] = ds[name]
# run early, non-critical validation
self.validate()
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _validate_attributes(self, ds):
'''
Ensures that there are no keys in the dat
|
brain-tec/partner-contact
|
partner_phone_search/__manifest__.py
|
Python
|
agpl-3.0
| 616 | 0 |
# Copyright 2018 - TODAY Serpent Consulting
|
Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
'name': 'Search Partner Phone/Mobile/Email',
'version': '11.0.1.0.1',
'category': 'Extra Tools',
'summary': 'Partner Search by Phone/Mobile/Email',
'author': "Serpent Consulting Services Pvt. Ltd.,"
"Odoo Com
|
munity Association (OCA)",
'website': 'https://github.com/OCA/partner-contact',
'license': 'AGPL-3',
'depends': [
'base',
],
'installable': True,
'auto_install': False,
}
|
everfor/Neural_Artistic_Style
|
vgg.py
|
Python
|
mit
| 2,235 | 0.008501 |
import tensorflow as tf
import numpy as np
import scipy.io
vgg_layers = [
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4'
]
vgg_layer_types = [
'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'conv', 'relu'
]
# Build the vgg convnet
# Returns convnet and mean pixel of the convnet
def build_net(path_network, input_image):
# Load pretrained convnet
|
pretrained_net = scipy.io.loadmat(path_network)
# Mean of input pixels - used to normalize input images
mean = np.mean(pretrained_net['normalization'][0][0][0], axis = (0, 1))
layers = pretrained_net['layers'][0]
convnet = {}
|
current = input_image
for i, name in enumerate(vgg_layers):
if vgg_layer_types[i] == 'conv':
# Convolution layer
kernel, bias = layers[i][0][0][0][0]
# (width, height, in_channels, out_channels) -> (height, width, in_channels, out_channels)
kernels = np.transpose(kernel, (1, 0, 2, 3))
bias = bias.reshape(-1)
conv = tf.nn.conv2d(current, tf.constant(kernel), strides = (1, 1, 1, 1), padding = 'SAME')
current = tf.nn.bias_add(conv, bias)
elif vgg_layer_types[i] == 'relu':
# Relu layer
current = tf.nn.relu(current)
elif vgg_layer_types[i] == 'pool':
# Pool layer
current = tf.nn.avg_pool(current, ksize = (1, 2, 2, 1), strides = (1, 2, 2, 1), padding = 'SAME')
convnet[name] = current
return convnet, mean
def pre_process_image(image, mean_pixel):
return image - mean_pixel
def restore_image(image, mean_pixel):
return image + mean_pixel
|
shubhamVerma/code-eval
|
Category - Easy/primePalindromeCodeEval.py
|
Python
|
gpl-3.0
| 1,290 | 0.020155 |
'''
primepalCodeEval.py - Solution to Problem Prime Palindrome (Category - Easy)
Copyright (C) 2013, Shubham Verma
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as pub
|
lished by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURP
|
OSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Description:
Write a program to determine the biggest prime palindrome under 1000.
Input sample:
None
Output sample:
Your program should print the largest palindrome on stdout. i.e.
929
'''
from math import sqrt
def isPrime(num):
if num%2 == 0:
return False
else:
for i in xrange(3, int(sqrt(num)), 2):
if num % i == 0:
return False
return True
if __name__ == '__main__':
for num in reversed(xrange(1000)):
if str(num) == str(num)[::-1] and isPrime(num):
print num
break
|
MaxIV-KitsControls/netspot
|
netspot/lib/spotmax/helpers.py
|
Python
|
mit
| 1,004 | 0.025896 |
#!/usr/bin/python -tt
"""Helper functions."""
from dns import resolver
# Exceptions
class CouldNotResolv(Exception):
"""Exception for unresolvable hostname."""
pass
def resolv(hostname):
"""Select and query DNS servers.
Args:
hostname: string, hostname
Returns:
ips: list, list of IPs
"""
ips = list()
# Create resolver object
res = resolver.Resolver()
# Choose the correct DNS servers
# Blue DNS servers
if hostname.startswith('b-'):
re
|
s.na
|
meservers = ['172.16.2.10', '172.16.2.11']
# Green DNS servers
elif hostname.startswith('g-'):
res.nameservers = ['10.0.2.10', '10.0.2.11']
# Default to white DNS servers
else:
res.nameservers = ['194.47.252.134', '194.47.252.135']
# Query
try:
query = res.query(hostname)
for answer in query:
ips.append(answer.address)
except resolver.NXDOMAIN:
raise CouldNotResolv
# Return query result
return ips
def main():
"""Main."""
pass
if __name__ == '__main__':
main()
|
ponty/pyscreenshot
|
pyscreenshot/check/speedtest.py
|
Python
|
bsd-2-clause
| 3,228 | 0.002169 |
import sys
import time
from entrypoint2 import entrypoint
import pyscreenshot
from pyscreenshot.plugins.gnome_dbus import GnomeDBusWrapper
from pyscreenshot.plugins.gnome_screenshot import GnomeScreenshotWrapper
from pyscreenshot.plugins.kwin_dbus import KwinDBusWrapper
from pyscreenshot.util import run_mod_as_subproc
def run(force_backend, n, childprocess, bbox=None):
sys.stdout.write("%-20s\t" % force_backend)
sys.stdout.flush() # before any crash
if force_backend == "default":
force_backend = None
try:
start = time.time()
for _ in range(n):
pyscreenshot.grab(
backend=force_backend, childprocess=childprocess, bbox=bbox
)
end = time.time()
dt = end - start
s = "%-4.2g sec\t" % dt
s += "(%5d ms per call)" % (1000.0 * dt / n)
sys.stdout.write(s)
finally:
print("")
novirt = [GnomeDBusWrapper.name, KwinDBusWrapper.name, GnomeScreenshotWrapper.name]
def run_all(n, childprocess_param, virtual_only=True, bbox=None):
debug = True
print("")
print("n=%s" % n)
print("------------------------------------------------------")
if bbox:
x1, y1, x2, y2 = map(str, bbox)
bbox = ":".join(map(str, (x1, y1, x2, y2)))
bboxpar = ["--bbox", bbox]
else:
bboxpar = []
if debug:
debugpar = ["--debug"]
else:
debugpar = []
for x in ["default"] + pyscreenshot.backends():
backendpar = ["--backend", x]
# skip non X backends
if virtual_only and x in novirt:
continue
p = run_mod_as_subproc(
"pyscreenshot.check.speedtest",
["--childprocess", childprocess_param] + bboxpar + debugpar + backendpar,
)
print(p.stdout)
@entrypoint
def speedtest(virtual_display=False, backend="", childprocess="", bbox="", number=10):
"""Performance test of all back-ends.
:param virtual_display: run with Xvfb
:param bbox: bounding box coordinates x1:y1:x2:y2
:param backend:
|
back-end can be forced if set (example:default, scrot, wx,..),
otherwise all back-ends are tested
:param childprocess: pyscreenshot parameter childprocess (0/1)
:param number: number of screenshots for each backend (default:10)
"""
childprocess_param = childprocess
if childprocess == "":
childprocess = T
|
rue # default
elif childprocess == "0":
childprocess = False
elif childprocess == "1":
childprocess = True
else:
raise ValueError("invalid childprocess value")
if bbox:
x1, y1, x2, y2 = map(int, bbox.split(":"))
bbox = x1, y1, x2, y2
else:
bbox = None
def f(virtual_only):
if backend:
try:
run(backend, number, childprocess, bbox=bbox)
except pyscreenshot.FailedBackendError:
pass
else:
run_all(number, childprocess_param, virtual_only=virtual_only, bbox=bbox)
if virtual_display:
from pyvirtualdisplay import Display
with Display(visible=0):
f(virtual_only=True)
else:
f(virtual_only=False)
|
sassoo/goldman
|
goldman/models/__init__.py
|
Python
|
mit
| 353 | 0 |
"""
models
~~~~~~
Module containing all
|
of our models that are typically
accessed in a CRUD like manner.
"""
from ..models.base import Model as BaseModel
from ..models.default_schema import Model as Defau
|
ltSchemaModel
from ..models.login import Model as LoginModel
MODELS = [
BaseModel,
DefaultSchemaModel,
LoginModel,
]
|
missionpinball/mpf
|
mpf/tests/test_PlayerVars.py
|
Python
|
mit
| 2,380 | 0.006303 |
from mpf.tests.MpfGameTestCase import MpfGameTestCase
class TestPlayerVars(MpfGameTestCase):
def get_config_file(self):
return 'player_vars.yaml'
def get_machine_path(self):
return 'tests/machine_files/player_vars/'
def test_initial_values(self):
self.fill_troughs()
self.start_two_player_game()
for x in range(2):
self.assertEqual(self.machine.game.player_list[x].some_var, 4)
self.assertEqual(type(self.machine.game.player_list[x].some_var), int)
self.assertEqual(self.machine.game.player_list[x].some_float, 4.0)
self.assertEqual(type(self.machine.game.player_list[x].some_float), float)
self.assertEqual(self.machine.game.player_list[x].some_string, '4')
self.assertEqual(type(self.machine.game.player_list[x].some_string), str)
self.assertEqual(self.machine.game.player_list[x].some_other_string, 'hello')
self.assertEqual(type(self.machine.game.player_list[x].some_other_string), str)
self.machine.game.player.test = 7
self.assertEqual(7, self.machine.game.player.test)
self.assertEqual(7, self.machine.game.player.vars["test"])
self.assertEqual(4, self.machine.variables.get_machine_var("test1"))
self.assertEqual('5', self.machine.variables.get_machine_var("test2"))
def test_event_kwargs(self):
self.fill_troughs()
self.start_game()
self.assertEqual(self.machine.game.player.some_var, 4)
self.mock_event('player_some_var')
self.machine.game.player.add_with_kwargs('some_var', 6, foo='bar')
self.advance_time_and_run()
self.assertEventCalledWith('player_some_var',
value=10,
prev_value=4,
change=6,
player_num=1,
|
foo='bar')
self.machine.game.player.set_with_kwargs('some_var', 1, bar='foo')
self.advance_time_and_run()
self.assertEventCalledWith('player_some_var',
|
value=1,
prev_value=10,
change=-9,
player_num=1,
bar='foo')
|
duducosmos/pgs4a
|
python-install/bin/smtpd.py
|
Python
|
lgpl-2.1
| 18,597 | 0.00043 |
#!/home/tom/ab/android/python-for-android/build/python-install/bin/python2.7
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A numbe
|
r of classes are provided:
#
# SMTPServer - the base clas
|
s for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <barry@python.org>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <person@dom.com> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcp
|
fooelisa/netmiko
|
netmiko/_textfsm/_clitable.py
|
Python
|
mit
| 12,990 | 0.007467 |
"""
Google's clitable.py is inherently integrated to Linux:
This is a workaround for that (basically include modified clitable code without anything
that is Linux-specific).
_clitable.py is identical to Google's as of 2017-12-17
_texttable.py is identical to Google's as of 2017-12-17
_terminal.py is a highly stripped down version of Google's such that clitable.py works
https://github.com/google/textfsm/blob/master/clitable.py
"""
# Some of this code is from Google with the following license:
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:/
|
/www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the Li
|
cense for the specific language governing
# permissions and limitations under the License.
import copy
import os
import re
import threading
import copyable_regex_object
import textfsm
from netmiko._textfsm import _texttable as texttable
class Error(Exception):
"""Base class for errors."""
class IndexTableError(Error):
"""General INdexTable error."""
class CliTableError(Error):
"""General CliTable error."""
class IndexTable(object):
"""Class that reads and stores comma-separated values as a TextTable.
Stores a compiled regexp of the value for efficient matching.
Includes functions to preprocess Columns (both compiled and uncompiled).
Attributes:
index: TextTable, the index file parsed into a texttable.
compiled: TextTable, the table but with compiled regexp for each field.
"""
def __init__(self, preread=None, precompile=None, file_path=None):
"""Create new IndexTable object.
Args:
preread: func, Pre-processing, applied to each field as it is read.
precompile: func, Pre-compilation, applied to each field before compiling.
file_path: String, Location of file to use as input.
"""
self.index = None
self.compiled = None
if file_path:
self._index_file = file_path
self._index_handle = open(self._index_file, 'r')
self._ParseIndex(preread, precompile)
def __del__(self):
"""Close index handle."""
if hasattr(self, '_index_handle'):
self._index_handle.close()
def __len__(self):
"""Returns number of rows in table."""
return self.index.size
def __copy__(self):
"""Returns a copy of an IndexTable object."""
clone = IndexTable()
if hasattr(self, '_index_file'):
# pylint: disable=protected-access
clone._index_file = self._index_file
clone._index_handle = self._index_handle
clone.index = self.index
clone.compiled = self.compiled
return clone
def __deepcopy__(self, memodict=None):
"""Returns a deepcopy of an IndexTable object."""
clone = IndexTable()
if hasattr(self, '_index_file'):
# pylint: disable=protected-access
clone._index_file = copy.deepcopy(self._index_file)
clone._index_handle = open(clone._index_file, 'r')
clone.index = copy.deepcopy(self.index)
clone.compiled = copy.deepcopy(self.compiled)
return clone
def _ParseIndex(self, preread, precompile):
"""Reads index file and stores entries in TextTable.
For optimisation reasons, a second table is created with compiled entries.
Args:
preread: func, Pre-processing, applied to each field as it is read.
precompile: func, Pre-compilation, applied to each field before compiling.
Raises:
IndexTableError: If the column headers has illegal column labels.
"""
self.index = texttable.TextTable()
self.index.CsvToTable(self._index_handle)
if preread:
for row in self.index:
for col in row.header:
row[col] = preread(col, row[col])
self.compiled = copy.deepcopy(self.index)
for row in self.compiled:
for col in row.header:
if precompile:
row[col] = precompile(col, row[col])
if row[col]:
row[col] = copyable_regex_object.CopyableRegexObject(row[col])
def GetRowMatch(self, attributes):
"""Returns the row number that matches the supplied attributes."""
for row in self.compiled:
try:
for key in attributes:
# Silently skip attributes not present in the index file.
# pylint: disable=E1103
if key in row.header and row[key] and not row[key].match(attributes[key]):
# This line does not match, so break and try next row.
raise StopIteration()
return row.row
except StopIteration:
pass
return 0
class CliTable(texttable.TextTable):
"""Class that reads CLI output and parses into tabular format.
Reads an index file and uses it to map command strings to templates. It then
uses TextFSM to parse the command output (raw) into a tabular format.
The superkey is the set of columns that contain data that uniquely defines the
row, the key is the row number otherwise. This is typically gathered from the
templates 'Key' value but is extensible.
Attributes:
raw: String, Unparsed command string from device/command.
index_file: String, file where template/command mappings reside.
template_dir: String, directory where index file and templates reside.
"""
# Parse each template index only once across all instances.
# Without this, the regexes are parsed at every call to CliTable().
_lock = threading.Lock()
INDEX = {}
# pylint: disable=C6409
def synchronised(func):
"""Synchronisation decorator."""
# pylint: disable=E0213
def Wrapper(main_obj, *args, **kwargs):
main_obj._lock.acquire() # pylint: disable=W0212
try:
return func(main_obj, *args, **kwargs) # pylint: disable=E1102
finally:
main_obj._lock.release() # pylint: disable=W0212
return Wrapper
# pylint: enable=C6409
@synchronised
def __init__(self, index_file=None, template_dir=None):
"""Create new CLiTable object.
Args:
index_file: String, file where template/command mappings reside.
template_dir: String, directory where index file and templates reside.
"""
# pylint: disable=E1002
super(CliTable, self).__init__()
self._keys = set()
self.raw = None
self.index_file = index_file
self.template_dir = template_dir
if index_file:
self.ReadIndex(index_file)
def ReadIndex(self, index_file=None):
"""Reads the IndexTable index file of commands and templates.
Args:
index_file: String, file where template/command mappings reside.
Raises:
CliTableError: A template column was not found in the table.
"""
self.index_file = index_file or self.index_file
fullpath = os.path.join(self.template_dir, self.index_file)
if self.index_file and fullpath not in self.INDEX:
self.index = IndexTable(self._PreParse, self._PreCompile, fullpath)
self.INDEX[fullpath] = self.index
else:
self.index = self.INDEX[fullpath]
# Does the IndexTable have the right columns.
if 'Template' not in self.index.index.header: # pylint: disable=E1103
raise CliTableError("Index file does not have 'Template' column.")
def _TemplateNamesToFiles(self, template_str):
"""Parses a string of templates into a list of file handles."""
template_list = template_str.split(':')
template_files = []
try:
for tmplt in template_list:
template_files.append(
open(os.path.join(self.template_dir, tmplt), 'r'))
except: # noqa
for tmplt in template_files:
tmplt.close()
raise
return template_files
def ParseCmd(self, cmd_input, attributes=None, templates=None):
"""Creates a TextTable table of values from cmd_input string.
Parses command output with template/s. If more than one template is found
subsequent tables are merged if keys match (dropped otherwise).
Args
|
arthurio/coc_war_planner
|
coc_war_planner/api/views.py
|
Python
|
mit
| 3,228 | 0.002478 |
from coc_war_planner.api.permissions import CreateNotAllowed
from coc_war_planner.api.permissions import IsChiefOrReadOnly
from coc_war_planner.api.permissions import IsUserOrReadOnly
from coc_war_planner.api.permissions import IsOwnerOrReadOnly
from coc_war_planner.api.permissions import IsNotPartOfClanOrCreateNotAllowed
from coc_war_planner.api.serializers import ClanSerializer
from coc_war_planner.api.serializers import ClanPutSerializer
from coc_war_planner.api.serializers import MemberGetSerializer
from coc_war_planner.api.serializers import MemberSerializer
from coc_war_planner.api.serializers import TroopsPostSerializer
from coc_war_planner.api.serializers import TroopsPutSerializer
from coc_war_planner.api.serializers import TroopsGetSerializer
from coc_war_planner.core.models import Clan
from coc_war_planner.core.models import Member
from coc_war_planner.core.models import Troops
from coc_war_planner.core.models import TroopLevel
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from rest_framework import filters
from rest_framework import permissions
from rest_framework import serializers
from rest_framework import viewsets
class ClanViewSet(viewsets.ModelViewSet):
queryset = Clan.objects.all()
serializer_class = ClanSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsChiefOrReadOnly,
IsNotPartOfClanOrCreateNotAllowed)
filter_backends = (filters.OrderingFilter, filters.SearchFilter,)
ordering_fields = ('name', 'pin',)
ordering = 'name' # default ordering
search_fields = ('name', 'pin',)
def perform_create(self, serializer):
instance = serializer.save(chief=self.request.user.member)
self.request.user.member.clan = instance
self.request.user.member.save()
def get_serializer_class(self):
if self.request.method == 'PUT':
return ClanPutSerializer
return ClanSerializer
class MemberViewSet(viewsets.ModelViewSet):
queryset = Member.objects.all()
serializer_class = MemberSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
CreateNotAllowed,
IsUserOrReadOnly,)
def get_serializer_class(self):
if self.request.method == 'GET':
return MemberGetSerializer
return MemberSerializ
|
er
class TroopsViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
def get_serializer_class(self):
if self.request.method == 'POST':
return TroopsPostSerializer
elif self.request.method == 'PUT':
return TroopsPutSerializer
return TroopsGetSerializer
def get_queryset(
|
self):
member_id = self.request.GET.get('member_id', self.request.user.member.id)
if member_id is None:
raise serializers.ValidationError({
'member_id': 'Parameter is missing.'
})
troops = Troops.objects.filter(member_id=member_id)
troops_id = self.kwargs.get(self.lookup_field)
if troops_id:
troops = troops.filter(pk=troops_id)
return troops
|
andrius-preimantas/purchase-workflow
|
purchase_order_force_number/__openerp__.py
|
Python
|
agpl-3.0
| 1,589 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Purchase orders - Force number",
'version': '0.1',
'category': 'Purchase Management',
'summary': "Force purchase orders numeration",
'description': """
This simple module allows to specify the number to use when creating purchase
orders. If user does not change the default value ('/'), the standard sequence
is used.""",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3
|
',
"depends": ['purchase'],
"data": [
|
'purchase_view.xml',
],
"demo": [],
"active": False,
"installable": False
}
|
Aramist/Self-Driving-Car
|
ros/install/_setup_util.py
|
Python
|
mit
| 12,461 | 0.002568 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for
|
path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.p
|
ath.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks =
|
PBR/path2gene
|
path2gene.py
|
Python
|
bsd-3-clause
| 8,068 | 0.005949 |
#!/usr/bin/python
"""
Small web application to retrieve genes from the tomato genome
annotation involved to a specified pathways.
"""
import flask
from flaskext.wtf import Form, TextField
import ConfigParser
import datetime
import json
import os
import rdflib
import urllib
CONFIG = ConfigParser.ConfigParser()
CONFIG.readfp(open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'path2gene.cfg')))
# Address of the sparql server to query.
SERVER = CONFIG.get('path2gene', 'sparql_server')
# Create the application.
APP = flask.Flask(__name__)
APP.secret_key = CONFIG.get('path2gene', 'secret_key')
# Stores in which graphs are the different source of information.
GRAPHS = {option: CONFIG.get('graph', option) for option in CONFIG.options('graph')}
class PathwayForm(Form):
""" Simple text field form to input the pathway of interest.
"""
pathway_name = TextField('Pathway name (or part of it)')
def search_pathway_in_db(name):
""" Search the uniprot database for pathways having the given string
in their name. It returns a list of these pathways.
@param name, a string, name or part of the name of the pathway to
search in uniprot.
@return, a list of the pathway names found for having the given
string.
"""
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
SELECT DISTINCT ?pathdesc
FROM <%(uniprot)s>
WHERE{
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot rdfs:comment ?pathdesc .
FILTER (
regex(?pathdesc, "%(search)s", "i")
)
} ORDER BY ASC(?pathdesc)
''' % {'search': name, 'uniprot': GRAPHS['uniprot']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
pathways = []
for entry in data_js['results']['bindings']:
pathways.append(entry['pathdesc']['value'])
return pathways
def get_gene_of_pathway(pathway):
""" Retrieve all the gene associated with pathways containing the
given string.
@param name, a string, name of the pathway for which to retrieve the
genes in the tomato genome annotation.
@return, a hash of the genes name and description found to be
associated with the specified pathway.
"""
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
SELECT DISTINCT ?gene ?desc ?pathdesc
FROM <%(itag)s>
FROM <%(uniprot)s>
WHERE{
?geneobj gene:Protein ?prot .
?geneobj gene:Description ?desc .
?geneobj gene:FeatureName ?gene .
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot rdfs:comment ?pathdesc .
FILTER (
regex(?pathdesc, "%(search)s", "i")
)
} ORDER BY ASC(?gene)
''' % {'search': pathway, 'uniprot': GRAPHS['uniprot'],
'itag': GRAPHS['itag']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
genes = {}
for entry in data_js['results']['bindings']:
genes[entry['gene']['value']] = [entry['desc']['value'],
entry['pathdesc']['value']]
return genes
def get_gene_of_pathway_strict(pathway):
""" Retrieve all the gene associated with the given pathway.
@param name
|
, a string, name of the pathway for which to retrieve the
genes in the tomato genome annotation.
@return, a hash of th
|
e genes name and description found to be
associated with the specified pathway.
"""
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
SELECT DISTINCT ?gene ?desc
FROM <%(itag)s>
FROM <%(uniprot)s>
WHERE{
?geneobj gene:Protein ?prot .
?geneobj gene:Description ?desc .
?geneobj gene:FeatureName ?gene .
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot rdfs:comment "%(search)s" .
} ORDER BY ASC(?gene)
''' % {'search': pathway, 'uniprot': GRAPHS['uniprot'],
'itag': GRAPHS['itag']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
genes = {}
for entry in data_js['results']['bindings']:
genes[entry['gene']['value']] = [entry['desc']['value'],
pathway]
return genes
def sparql_query(query, server, output_format='application/json'):
""" Runs the given SPARQL query against the desired sparql endpoint
and return the output in the format asked (default being rdf/xml).
@param query, the string of the sparql query that should be ran.
@param server, a string, the url of the sparql endpoint that we want
to run query against.
@param format, specifies in which format we want to have the output.
Defaults to `application/json` but can also be `application/rdf+xml`.
@return, a JSON object, representing the output of the provided
sparql query.
"""
params = {
'default-graph': '',
'should-sponge': 'soft',
'query': query,
'debug': 'off',
'timeout': '',
'format': output_format,
'save': 'display',
'fname': ''
}
querypart = urllib.urlencode(params)
response = urllib.urlopen(server, querypart).read()
try:
output = json.loads(response)
except ValueError:
output = {}
return output
## Web-app
@APP.route('/', methods=['GET', 'POST'])
def index():
""" Shows the front page.
All the content of this page is in the index.html file under the
templates directory. The file is full html and has no templating
logic within.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
form = PathwayForm(csrf_enabled=False)
if form.validate_on_submit():
return flask.redirect(flask.url_for('search_pathway',
name=form.pathway_name.data))
return flask.render_template('index.html', form=form)
@APP.route('/search/<name>')
def search_pathway(name):
""" Search the database for pathways containing the given string.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
pathways = search_pathway_in_db(name)
core = []
for path in pathways:
core.append('%s*' % path.split(';')[0].strip())
core = list(set(core))
return flask.render_template('search.html', data=pathways,
search=name, core=core)
@APP.route('/path/<path:pathway>')
def pathway(pathway):
""" Show for the given pathways all the genes found to be related.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
if pathway.endswith('*'):
genes = get_gene_of_pathway(pathway[:-1])
else:
genes = get_gene_of_pathway_strict(pathway)
geneids = genes.keys()
geneids.sort()
return flask.render_template('output.html', pathway=pathway,
genes=genes, geneids=geneids)
@APP.route('/csv/<path:pathway>.csv')
def generate_csv(pathway):
""" Generate a comma separated value file containing all the
information.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
# Regenerate the informations
if pathway.endswith('*'):
genes = get_gene_of_pathway(pathway[:-1])
else:
genes = get_gene_of_pathway_strict(pathway)
string = 'Gene ID, Gene description, Pathway\n'
for gene in genes:
string = string + "%s, %s, %s\n" % (gene, genes[gene][0],
genes[gene][1])
return flask.Response(string, mimetype='application/excel')
if __name__ == '__main__':
APP.debug = True
APP.run()
|
scheib/chromium
|
third_party/blink/tools/blinkpy/tool/mock_tool.py
|
Python
|
bsd-3-clause
| 1,739 | 0 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WH
|
ETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from blinkpy.common.host_mock import MockHost
class MockBlinkTool(MockHost):
def __i
|
nit__(self, *args, **kwargs):
MockHost.__init__(self, *args, **kwargs)
def path(self):
return 'echo'
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/enrollments/urls.py
|
Python
|
agpl-3.0
| 1,148 | 0.002613 |
"""
URLs for the Enrollment API
"""
from django.conf import settings
from django.conf.urls import url
from .views import (
CourseEnrollmentsApiListView,
EnrollmentCourseDetailView,
EnrollmentListView,
EnrollmentUserRolesView,
EnrollmentView,
UnenrollmentView
)
urlpatterns = [
url(r'^enrollment/{username},{course_key}$'.format(
username=settings.USERNAME_PATTERN,
course_key=settings.COURSE_ID_PATTERN),
EnrollmentView.as_view(), name='course
|
enrollment'),
url(r'^enrollment/{course_key}$'.format(course_key=settings.COURSE_ID_PATTERN),
EnrollmentView.as_view(), name='courseenrollment'),
url(r'^enrollment$', EnrollmentListView.as_view(), name='courseenrollments'),
url(r'^enrollments/?$', CourseEnrollmentsApiListView.as_view(), name='courseenrollmentsapilist'),
url(r'^course/{course_key}$'.format(course_key
|
=settings.COURSE_ID_PATTERN),
EnrollmentCourseDetailView.as_view(), name='courseenrollmentdetails'),
url(r'^unenroll/$', UnenrollmentView.as_view(), name='unenrollment'),
url(r'^roles/$', EnrollmentUserRolesView.as_view(), name='roles'),
]
|
tri2sing/LinearAlgebraPython
|
vec.py
|
Python
|
apache-2.0
| 3,573 | 0.012315 |
def getitem(v,d):
"Returns the value of entry d in v"
assert d in v.D
return v.f[d] if d in v.f else 0
def setitem(v,d,val):
"Set the element of v with label d to be val"
assert d in v.D
v.f[d] = val
def equal(u,v):
"Returns true iff u is equal to v"
assert u.D == v.D
union = set(u.f) | set (v.f)
for k in union:
uval = u.f[k] if k in u.f else 0
vval = v.f[k] if k in v.f else 0
if uval != vval:
return False
return True
def add(u,v):
"Returns the sum of the two vectors"
assert u.D == v.D
ukeys =
|
set(u.f)
vkeys = set (v.f)
both = ukeys & vkeys
uonly = ukeys - both
vonly = vkeys - both
f = {}
for k in both:
f[k] = u.f[k] + v.f[k]
for
|
k in uonly:
f[k] = u.f[k]
for k in vonly:
f[k] = v.f[k]
return Vec (u.D | v.D, f)
def dot(u,v):
"Returns the dot product of the two vectors"
assert u.D == v.D
ukeys = set(u.f)
vkeys = set (v.f)
both = ukeys & vkeys
return sum([u.f[k] * v.f[k] for k in both])
def scalar_mul(v, alpha):
"Returns the scalar-vector product alpha times v"
f = {k: alpha * v.f[k] for k in v.f}
return (Vec(v.D, f))
def neg(v):
"Returns the negation of a vector"
return scalar_mul (v, -1)
def toStr(v):
"pretty-printing"
try:
D_list = sorted(v.D)
except TypeError:
D_list = sorted(v.D, key=hash)
numdec = 3
wd = dict([(k,(1+max(len(str(k)), len('{0:.{1}G}'.format(v[k], numdec))))) if isinstance(v[k], int) or isinstance(v[k], float) else (k,(1+max(len(str(k)), len(str(v[k]))))) for k in D_list])
# w = 1+max([len(str(k)) for k in D_list]+[len('{0:.{1}G}'.format(value,numdec)) for value in v.f.values()])
s1 = ''.join(['{0:>{1}}'.format(k,wd[k]) for k in D_list])
s2 = ''.join(['{0:>{1}.{2}G}'.format(v[k],wd[k],numdec) if isinstance(v[k], int) or isinstance(v[k], float) else '{0:>{1}}'.format(v[k], wd[k]) for k in D_list])
return "\n" + s1 + "\n" + '-'*sum(wd.values()) +"\n" + s2
##### NO NEED TO MODIFY BELOW HERE #####
class Vec:
"""
A vector has two fields:
D - the domain (a set)
f - a dictionary mapping (some) domain elements to field elements
elements of D not appearing in f are implicitly mapped to zero
"""
def __init__(self, labels, function):
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
__neg__ = neg
__rmul__ = scalar_mul #if left arg of * is primitive, assume it's a scalar
def __mul__(self,other):
#If other is a vector, returns the dot product of self and other
if isinstance(other, Vec):
return dot(self,other)
else:
return NotImplemented # Will cause other.__rmul__(self) to be invoked
def __truediv__(self,other): # Scalar division
return (1/other)*self
__add__ = add
def __radd__(self, other):
"Hack to allow sum(...) to work with vectors"
if other == 0:
return self
# def __sub__(self, a,b):
# "Returns a vector which is the difference of a and b."
# return a+(-b)
def __sub__(self, other):
"Returns a vector which is the difference of a and b."
return self+(-other)
__eq__ = equal
__str__ = toStr
def __repr__(self):
return "Vec(" + str(self.D) + "," + str(self.f) + ")"
def copy(self):
"Don't make a new copy of the domain D"
return Vec(self.D, self.f.copy())
|
enigmampc/catalyst
|
tests/utils/test_argcheck.py
|
Python
|
apache-2.0
| 7,116 | 0 |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from catalyst.utils.argcheck import (
verify_callable_argspec,
Argument,
NoStarargs,
UnexpectedStarargs,
NoKwargs,
UnexpectedKwargs,
NotCallable,
NotEnoughArguments,
TooManyArguments,
MismatchedArguments,
)
class TestArgCheck(TestCase):
def test_not_callable(self):
"""
Check the results of a non-callable object.
"""
not_callable = 'a'
with self.assertRaises(NotCallable):
verify_ca
|
llable_argspec(not_callable)
def test_no_starargs(self):
"""
Tests when a function does not have *args and it was expected.
"""
def f(a):
pass
with self.assertRaises(NoStarargs):
verify_callable_argspec(f, expect_starargs=True)
def test_starargs(self):
"""
Tests when a function has *args and it was expected.
"""
def f(*args):
pass
verify_callable_argspec(f,
|
expect_starargs=True)
def test_unexcpected_starargs(self):
"""
Tests a function that unexpectedly accepts *args.
"""
def f(*args):
pass
with self.assertRaises(UnexpectedStarargs):
verify_callable_argspec(f, expect_starargs=False)
def test_ignore_starargs(self):
"""
Tests checking a function ignoring the presence of *args.
"""
def f(*args):
pass
def g():
pass
verify_callable_argspec(f, expect_starargs=Argument.ignore)
verify_callable_argspec(g, expect_starargs=Argument.ignore)
def test_no_kwargs(self):
"""
Tests when a function does not have **kwargs and it was expected.
"""
def f():
pass
with self.assertRaises(NoKwargs):
verify_callable_argspec(f, expect_kwargs=True)
def test_kwargs(self):
"""
Tests when a function has **kwargs and it was expected.
"""
def f(**kwargs):
pass
verify_callable_argspec(f, expect_kwargs=True)
def test_unexpected_kwargs(self):
"""
Tests a function that unexpectedly accepts **kwargs.
"""
def f(**kwargs):
pass
with self.assertRaises(UnexpectedKwargs):
verify_callable_argspec(f, expect_kwargs=False)
def test_ignore_kwargs(self):
"""
Tests checking a function ignoring the presence of **kwargs.
"""
def f(**kwargs):
pass
def g():
pass
verify_callable_argspec(f, expect_kwargs=Argument.ignore)
verify_callable_argspec(g, expect_kwargs=Argument.ignore)
def test_arg_subset(self):
"""
Tests when the args are a subset of the expectations.
"""
def f(a, b):
pass
with self.assertRaises(NotEnoughArguments):
verify_callable_argspec(
f, [Argument('a'), Argument('b'), Argument('c')]
)
def test_arg_superset(self):
def f(a, b, c):
pass
with self.assertRaises(TooManyArguments):
verify_callable_argspec(f, [Argument('a'), Argument('b')])
def test_no_default(self):
"""
Tests when an argument expects a default and it is not present.
"""
def f(a):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('a', 1)])
def test_default(self):
"""
Tests when an argument expects a default and it is present.
"""
def f(a=1):
pass
verify_callable_argspec(f, [Argument('a', 1)])
def test_ignore_default(self):
"""
Tests that ignoring defaults works as intended.
"""
def f(a=1):
pass
verify_callable_argspec(f, [Argument('a')])
def test_mismatched_args(self):
def f(a, b):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('c'), Argument('d')])
def test_ignore_args(self):
"""
Tests the ignore argument list feature.
"""
def f(a):
pass
def g():
pass
h = 'not_callable'
verify_callable_argspec(f)
verify_callable_argspec(g)
with self.assertRaises(NotCallable):
verify_callable_argspec(h)
def test_out_of_order(self):
"""
Tests the case where arguments are not in the correct order.
"""
def f(a, b):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('b'), Argument('a')])
def test_wrong_default(self):
"""
Tests the case where a default is expected, but the default provided
does not match the one expected.
"""
def f(a=1):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('a', 2)])
def test_any_default(self):
"""
Tests the any_default option.
"""
def f(a=1):
pass
def g(a=2):
pass
def h(a):
pass
expected_args = [Argument('a', Argument.any_default)]
verify_callable_argspec(f, expected_args)
verify_callable_argspec(g, expected_args)
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(h, expected_args)
def test_ignore_name(self):
"""
Tests ignoring a param name.
"""
def f(a):
pass
def g(b):
pass
def h(c=1):
pass
expected_args = [Argument(Argument.ignore, Argument.no_default)]
verify_callable_argspec(f, expected_args)
verify_callable_argspec(f, expected_args)
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(h, expected_args)
def test_bound_method(self):
class C(object):
def f(self, a, b):
pass
method = C().f
verify_callable_argspec(method, [Argument('a'), Argument('b')])
with self.assertRaises(NotEnoughArguments):
# Assert that we don't count self.
verify_callable_argspec(
method,
[Argument('self'), Argument('a'), Argument('b')],
)
|
rgayon/plaso
|
tests/analysis/mediator.py
|
Python
|
apache-2.0
| 1,743 | 0.004016 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the analysis mediator."""
from __future__ import unicode_literals
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from
|
dfvfs.path import factory as path_spec_factory
from plaso.analysis import mediator
from plaso.containers import sessions
from plaso.storage.fake import writer as fake_writer
from tests.analysis import test_lib
class AnalysisMediatorTest(test_lib.AnalysisPluginTestCase):
"""Tests for the analysis mediator."""
def testGetDisplayNameForPathSpec(self):
"""Tests the GetDisplayNameForPathSpec function
|
."""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
knowledge_base = self._SetUpKnowledgeBase()
analysis_mediator = mediator.AnalysisMediator(
storage_writer, knowledge_base)
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
expected_display_name = 'OS:{0:s}'.format(test_path)
display_name = analysis_mediator.GetDisplayNameForPathSpec(os_path_spec)
self.assertEqual(display_name, expected_display_name)
# TODO: add test for GetUsernameForPath.
# TODO: add test for ProduceAnalysisReport.
# TODO: add test for ProduceEventTag.
def testSignalAbort(self):
"""Tests the SignalAbort function."""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
knowledge_base = self._SetUpKnowledgeBase()
analysis_mediator = mediator.AnalysisMediator(
storage_writer, knowledge_base)
analysis_mediator.SignalAbort()
if __name__ == '__main__':
unittest.main()
|
MarsZone/DreamLand
|
evennia/evennia/commands/default/cmdset_unloggedin.py
|
Python
|
bsd-3-clause
| 821 | 0 |
"""
This module describes the unlogged state of the default game.
The setting STATE_UNLOGGED should be set to the python path
of the state instance in this module.
"""
from evennia.commands.cmdset import CmdSet
from evennia.commands.default import unloggedin
class UnloggedinCmdSet(CmdSet):
"""
Sets up the unlogged cmdset.
"""
key = "DefaultUnl
|
oggedin"
priority = 0
def at_cmdset_creation(self):
"Populate the cmdset"
self.add(unloggedin.CmdUnconnectedConnect())
self.add(unloggedin.CmdUnconnectedCreate())
self.add(unloggedin.CmdUnconnectedQuit())
self.add(unloggedin.CmdUnconnectedLook())
self.add(unloggedin.CmdU
|
nconnectedHelp())
self.add(unloggedin.CmdUnconnectedEncoding())
self.add(unloggedin.CmdUnconnectedScreenreader())
|
mantarayforensics/mantaray
|
Tools/Python/get_system_version.py
|
Python
|
gpl-3.0
| 3,323 | 0.020163 |
#!/usr/bin/env python3
#This extracts data from xml plists
#
#########################COPYRIGHT INFORMATION############################
#Copyright (C) 2013 dougkoster@hotmail.com #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
#
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
#
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see http://www.gnu.org/licenses/. #
#########################COPYRIGHT INFORMATION############################
from parted import *
from mount import *
from mount_ewf import *
from done import *
from unix2dos import *
from mmls import *
from check_for_folder import *
from calculate_md5 import *
import os
from os.path import join
import re
import io
import sys
import string
import subprocess
import datetime
import shutil
import plistlib
import xml.parsers.expat as expat
def get_system_version(plist_info, abs_file_path, md5, export_file, outfile, key_name):
plist_type = type(plist_info)
print("The plist type is: " + str(plist_type))
if(type(plist_info) is dict):
e
|
xport_file.write('File Path: ' + "\t" + abs_file_path + "\n")
export_file.write('MD5: ' + "\t\t" + str(md5) + "\n\n")
print(abs_file_path + " has a plist attribute that is a dict")
process_dict(plist_info, outfile, export_file, key_name)
elif(str(type(plist_info)) == "<class 'plistlib._InternalDict'>"):
export_
|
file.write('File Path: ' + "\t" + abs_file_path + "\n")
export_file.write('MD5: ' + "\t\t" + str(md5) + "\n")
print(abs_file_path + " has a plist attribute that is an internal dict")
process_dict(plist_info, outfile, export_file, key_name)
def process_dict(dictionary_plist, outfile, export_file, key_name):
#loop through dict plist
for key,value in sorted(dictionary_plist.items()):
if(key_name == key):
print("The key is: " + key + " The key_name is: " + key_name)
export_file.write(key + "=> " + value)
#figure out cat type
if(re.search('10.9', value)):
export_file.write("(Mavericks)")
elif(re.search('10.8', value)):
export_file.write("(Mountain Lion)")
elif(re.search('10.7', value)):
export_file.write("(Lion)")
elif(re.search('10.6', value)):
export_file.write("(Snow Leopard)")
elif(re.search('10.5', value)):
export_file.write("(Leopard)")
elif(re.search('10.4', value)):
export_file.write("(Tiger)")
elif(re.search('10.3', value)):
export_file.write("(Panther)")
elif(re.search('10.2', value)):
export_file.write("(Jaguar)")
elif(re.search('10.1', value)):
export_file.write("(Puma)")
elif(re.search('10.0', value)):
export_file.write("(Kodiak)")
return key
|
sonali0901/zulip
|
zerver/webhooks/hellosign/tests.py
|
Python
|
apache-2.0
| 884 | 0.005656 |
# -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class HelloSignHookTests(WebhookTestCase):
STREAM_NAME = 'hellosign'
URL_TEMPLATE = "/api/v1/external/hellosign?stream={stream}&api_key
|
={api_key}"
FIXTURE_DIR_NAME = 'hellosign'
def test_signatures_message(self):
# type: () -> None
expected_subject = "NDA with Acme Co."
expected_message = ("The NDA with Acme Co. is awaiting the signature of "
"Jack and was just signed by Jill.")
self.send_and_test_stream_message('signatures', expected_subject, expected_message,
|
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("hellosign", fixture_name, file_type="json")
|
to266/hyperspy
|
hyperspy/external/tifffile.py
|
Python
|
gpl-3.0
| 172,696 | 0.000029 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced a
|
t the Laboratory for Fluorescenc
|
e Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
import _tifffile
except ImportError:
warnings.warn(
"failed to import the optional _tifffile C extension module.\n"
"Loading of some compressed images will be slow.\n"
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = '{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size * \
data.dtype.itemsize > 2000 * 2 ** 20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1
|
svost/bitcoin
|
qa/rpc-tests/bumpfee.py
|
Python
|
mit
| 14,241 | 0.002036 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
from test_framework.util import *
import io
import time
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self, split=False):
extra_args = [["-debug", "-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
bitcoind_processes[1].wait()
self.nodes[1] = start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
print("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
print("Running tests")
dest_address = peer_node.getnewaddress()
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_locked_wallet_fails(rbf_node, dest_address)
print("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
version=0,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
get_change_address(rbf_node): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = create_fund_sign_send(peer_node, {dest_address: 0.00090000})
assert_raises_message(JSONRPCException, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_message(JSONRPCException, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = create_fund_sign_send(rbf_node, {rbf_node_address: 0.00050000})
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_message(JSONRPCException, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_address(rbf_node): Decimal("0.00010000")})
rbf_node.bumpfee(rbfid, {"totalFee": 20000})
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_addr
|
ess(rbf_node):
|
Decimal("0.00010000")})
assert_raises_message(JSONRPCException, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 20001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=9900, but it converts to 10,000
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
|
Superchicken1/DirtyDrive
|
DirtyDrive/apps.py
|
Python
|
apache-2.0
| 136 | 0 |
from __future__ import unicode_literals
f
|
rom django.apps import AppConfig
class DirtyDriveConfig(AppC
|
onfig):
name = 'DirtyDrive'
|
dabodev/dabodoc
|
api/epydoc/man.py
|
Python
|
mit
| 8,842 | 0.005881 |
# epydoc.py: manpage-style text output
# Edward Loper
#
# Created [01/30/01 05:18 PM]
# $Id: man.py,v 1.6 2003/07/18 15:46:19 edloper Exp $
#
"""
Documentation formatter that produces man-style documentation.
@note: This module is under development. It generates incomplete
documentation pages, and is not yet incorperated into epydoc's
command-line interface.
"""
__docformat__ = 'epytext en'
##################################################
## Imports
##################################################
# system imports
import sys, xml.dom.minidom
# epydoc imports
import epydoc
from epydoc.uid import UID, Link, findUID, make_uid
from epydoc.imports import import_module
from epydoc.objdoc import DocMap, ModuleDoc, FuncDoc
from epydoc.objdoc import ClassDoc, Var, Raise, ObjDoc
##################################################
## Documentation -> Text Conversion
##################################################
class ManFormatter:
def __init__(self, docmap, **kwargs):
self._docmap = docmap
#////////////////////////////////////////////////////////////
# Basic Doc Pages
#////////////////////////////////////////////////////////////
def documentation(self, uid):
if not self._docmap.has_key(uid):
print '**NO DOCS ON %s **' % uid
return
doc = self._docmap[uid]
if uid.is_module(): return self._modulepage(uid, doc)
elif uid.is_class(): return self._classpage(uid, doc)
elif uid.is_routine(): return self._routinepage(uid, doc)
elif uid.is_variable(): return self._varpage(uid, doc)
def _modulepage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
str += self._funclist(doc.functions(), doc, 'FUNCTIONS')
return str
def _classpage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
str += self._funclist(doc.methods(), doc, 'METHODS')
str += self._funclist(doc.staticmethods(), doc, 'STATIC METHODS')
str += self._funclist(doc.classmethods(), doc, 'CLASS METHODS')
return str
def _routinepage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
return str
def _varpage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
return str
#////////////////////////////////////////////////////////////
# Functions
#////////////////////////////////////////////////////////////
def _funclist(self, functions, cls, title='FUNCTIONS'):
str = self._title(title)
numfuncs = 0
for link in functions:
fname = link.name()
func = link.target()
if func.is_method():
container = func.cls()
inherit = (container != cls.uid())
else:
inherit = 0
try: container = func.module()
except TypeError: container = None
if not self._docmap.has_key(func):
continue
# If we don't have documentation for the function, then we
# can't say anything about it.
if not self._docmap.has_key(func): continue
fdoc = self._docmap[func]
# What does this method override?
foverrides = fdoc.overrides()
# Try to find a documented ancestor.
inhdoc = self._docmap.documented_ancestor(func) or fdoc
inherit_docs = (inhdoc is not fdoc)
numfuncs += 1
str += ' %s\n' % self._func_signature(self._bold(fname), fdoc)
# Use the inherited docs for everything but the signature.
fdoc = inhdoc
fdescr=fdoc.descr()
fparam = fdoc.parameter_list()[:]
freturn = fdoc.returns()
fraises = fdoc.raises()
# Don't list parameters that don't have any extra info.
f = lambda p:p.descr() or p.type()
fparam = filter(f, fparam)
# Description
if fdescr:
fdescr_str = fdescr.to_plaintext(None, indent=8)
if fdescr_str.strip(): str += fdescr_str
# Parameters
if fparam:
str += ' Parameters:\n'
for param in fparam:
pname = param.name()
str += ' ' + pname
if param.descr():
pdescr = param.descr().to_plaintext(None, indent=12)
str += ' - %s' % pdescr.strip()
str += '\n'
if param.type():
ptype = param.type().to_plaintext(none, indent=16)
str += ' '*16+'(type=%s)\n' % ptype.strip()
# Returns
if freturn.descr():
fdescr = freturn.descr().to_plaintext(None, indent=12)
str += ' Returns:\n%s' % fdescr
if freturn.type():
ftype = freturn.type().to_plaintext(None, indent=12)
str += (" Return Type: %s" % ftype.lstrip())
## Raises
#if fraises:
# str += ' Raises:\n'
# for fraise in fraises:
# str += ' '
# str += ''+fraise.name()+' -\n'
# str += epytext.to_plaintext(fraise.descr(), 12)
## Overrides
#if foverrides:
# str += ' <dl><dt><b>Overrides:</b></dt>\n'
# str += ' <dd>'+self._uid_to_href(foverrides)
# if inherit_docs:
# str += ' <i>(inherited documentation)</i>\n'
# str += '</dd>\n </dl>\n'
if numfuncs == 0: return ''
return str
def _func_signature(self, fname, fdoc, show_defaults=1):
str = fname
str += '('
str += self._params_to_text(fdoc.parameters(), show_defaults)
if fdoc.vararg():
vararg_name = fdoc.vararg().name()
if vararg_name != '...': vararg_name = '*%s' % vararg_name
str += '%s, ' % vararg_name
if fdoc.kwarg():
str += '**%s, ' % fdoc.kwarg().name()
if str[-1] != '(': str = str[:-2]
return str + ')'
def _params_to_text(self, parameters, show_defaults):
str = ''
for param in parameters:
if type(param) in (type([]), type(())):
sublist = self._params_to_text(param,
show_defaults)
str += '(%s), ' % sublist[:-2]
else:
str += param.name()
if show_defaults and param.default() is not None:
default = param.default()
if len(default) > 60:
default = default[:57]+'...'
str += '=%s' % default
str += ', '
return str
#////////////////////////////////////////////////////////////
|
# Helpers
#////////////////////////////////////////////////////////////
def _bold(self, text):
"""
|
Format a string in bold by overstriking."""
return ''.join([ch+'\b'+ch for ch in text])
def _title(self, text):
return '%s\n' % self._bold(text)
def _kind(self, uid):
if uid.is_package(): return 'package'
elif uid.is_module(): return 'module'
elif uid.is_class(): return 'class'
elif uid.is_method() or uid.is_builtin_method(): return 'method'
elif uid.is_routine(): return 'function'
elif uid.is_variable(): return 'variable'
else: raise AssertionError, 'Bad UID type for _name'
def _name(self, uid):
if uid.parent():
parent = uid.parent()
name = '%s %s in %s %s' % (self._kind(uid),
self._bold(uid.shortname()),
self._kind(parent),
self._bold(paren
|
googleapis/python-aiplatform
|
google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py
|
Python
|
apache-2.0
| 21,535 | 0.002136 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import index_endpoint
from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1.types import index_endpoint_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import IndexEndpointServiceGrpcTransport
class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport):
"""gRPC AsyncIO backend transport for IndexEndpointService.
A service for managing Vertex AI's IndexEndpoints.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
|
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
|
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecat
|
yorzh86/Step1
|
scripts/post.py
|
Python
|
gpl-2.0
| 1,575 | 0.040635 |
#!/usr/bin/env python
from pylab import *
t,x,y,u,v,ax,ay = loadtxt('trajectory.dat',unpack=True)
r = sqrt(x**2+y**2)
k = u**2+v**2
s = '.' if t.size < 100 else ''
figure('Trajectory',figsize=(5,4))
subplot(111,aspect=1)
plot(x,y,'b%s-'%s,lw=1)
xl,xh = (x.min(),x.max())
xb = 0.1*(xh-xl)
xlim(xl-xb,xh+xb)
|
yl,yh = (y.min(),y.max())
yb = 0.1*(yh-yl)
ylim(yl-yb,yh+yb)
xlabel(r'$x$-coordinate [m]')
ylabel(r'$y$-coordinate [m]')
tight_layout()
figure('',figsize=(8,8))
subplot(221)
#~ figure('Decay',figsize=(5,4))
plot(t,r,'r%s-'%s)
yl,yh = ylim()
yb = 0.1*(yh-0)
ylim(0-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Radius $r$ [m]')
#~ tight_layout()
subplot(222)
#~ figure('Kinetic Energy',figsize=(5,4))
plot(t,k,'r%s-'%s
|
)
yl,yh = ylim()
yb = 0.1*(yh-0)
ylim(0-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Kinetic Energy $KE$ [J]')
#~ tight_layout()
subplot(223)
#~ figure('Velocities',figsize=(5,4))
plot(t,u,'r%s-'%s,label=r'$\vec{v}\cdot\hat{e}_x$')
plot(t,v,'b%s-'%s,label=r'$\vec{v}\cdot\hat{e}_y$')
yl,yh = ylim()
yb = 0.1*(yh-yl)
ylim(yl-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Velocity $\vec{v}\cdot\hat{e}_n$ [m/s]')
legend(loc='best',fancybox=True,ncol=2)
#~ tight_layout()
subplot(224)
#~ figure('Acceleration',figsize=(5,4))
plot(t,ax,'r%s-'%s,label=r'$\vec{a}\cdot\hat{e}_x$')
plot(t,ay,'b%s-'%s,label=r'$\vec{a}\cdot\hat{e}_y$')
yl,yh = ylim()
yb = 0.1*(yh-yl)
ylim(yl-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Acceleration $\vec{a}\cdot\hat{e}_n$ [m/s$^2$]')
legend(loc='best',fancybox=True,ncol=2)
#~ tight_layout()
tight_layout()
show()
|
ryansb/workstation
|
roles/unbound/files/ddns.py
|
Python
|
mit
| 1,890 | 0.005291 |
#!/usr/bin/env python3
import os
import sys
import json
import boto3
import platform
import traceback
import subprocess
from datetime import datetime
config = {}
with open(os.path.expanduser('~/.ddns.conf')) as conf:
config.update(json.load(conf))
ZONE_ID = config['zone_id']
ROOT = config['root']
HOST = config.get('host', platform.uname().node.split('.')[0])
TTL = config.get('ttl', 300)
session = boto3.Session(profile_name='personal')
r53 = session.client('route53')
def dig_ip(hostname):
cmd = f'dig +short {hostname} @resolver1.opendns.com
|
'.split(' ')
try
|
:
return subprocess.check_output(cmd).decode('utf-8').strip()
except Exception as exc:
print(f'{datetime.utcnow().isoformat()}+UTC Failed to read DNS name - bailing out')
traceback.print_exc()
sys.exit(1)
def my_ip():
return dig_ip('myip.opendns.com')
def change_recordset(current_ip):
resp = r53.change_resource_record_sets(
HostedZoneId=ZONE_ID,
ChangeBatch={
'Comment': f'Automatic DDNS change {datetime.utcnow().isoformat()}+UTC',
'Changes': [{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': '.'.join((HOST, ROOT)),
'Type': 'A',
'TTL': TTL,
'ResourceRecords': [{'Value': current_ip}]
}
}]
}
)
print(f'{datetime.utcnow().isoformat()}+UTC Submitted change request: {resp}')
def main():
current_ip = my_ip()
r53_ip = dig_ip('.'.join((HOST, ROOT)))
if current_ip != r53_ip:
print(f'{datetime.utcnow().isoformat()}+UTC Mismatch alert, {r53_ip} does not match {current_ip}')
change_recordset(current_ip)
else:
print(f'{datetime.utcnow().isoformat()}+UTC All good - IP is updated in R53')
if __name__ == '__main__':
main()
|
darkshark007/PoGoCollection
|
Data/moves.py
|
Python
|
gpl-3.0
| 11,221 | 0.111755 |
class BASIC_MOVE:
ID = 0
Name = 1
Type = 2
PW = 3
Duration = 4
NRG = 5
NRGPS = 6
DPS = 7
# Adapted from the GAME_MASTER_FILE Json Output at:
# https://github.com/pokemongo-dev-contrib/pokemongo-game-master/
# https://raw.githubusercontent.com/pokemongo-dev-contrib/pokemongo-game-master/master/versions/latest/GAME_MASTER.json
BASIC_MOVE_DATA = [
# ID, Name,Type, PW, Duration (ms), NRG, NRGPS, DPS
[200,"Fury Cutter","Bug",3,400,6,15,7.5],
[201,"Bug Bite","Bug",5,500,6,12,10],
[202,"Bite","Dark",6,500,4,8,12],
[203,"Sucker Punch","Dark",7,700,8,11.428571428571429,10],
[204,"Dragon Breath","Dragon",6,500,4,8,12],
[205,"Thunder Shock","Electric",5,600,8,13.333333333333334,8.333333333333334],
[206,"Spark","Electric",6,700,9,12.857142857142858,8.571428571428571],
[207,"Low Kick","Fighting",6,600,6,10,10],
[208,"Karate Chop","Fighting",8,800,10,12.5,10],
[209,"Ember","Fire",10,1000,10,10,10],
[210,"Wing Attack","Flying",8,800,9,11.25,10],
[211,"Peck","Flying",10,1000,10,10,10],
[212,"Lick","Ghost",5,500,6,12,10],
[213,"Shadow Claw","Ghost",9,700,6,8.571428571428571,12.857142857142858],
[214,"Vine Whip","Grass",7,600,6,10,11.666666666666668],
[215,"Razor Leaf","Grass",13,1000,7,7,13],
[216,"Mud Shot","Ground",5,600,7,11.666666666666668,8.333333333333334],
[217,"Ice Shard","Ice",12,1200,12,10,10],
[218,"Frost Breath","Ice",10,900,8,8.
|
88888888888889,11.11111111111111],
[219,"Quick Attack","Normal",8,800,10,12.5,10],
[220,"Scratch","Normal",6,500,4,8,12],
[221,"Tackle","Normal",5,500,5,10,10],
[222,"Pound","Normal",7,600,6,10,11.666666666666668],
[223,"Cut","Normal",5,500,5,10,10],
[224,"Poison Jab","Poison",10,800,7,8.75,12.5],
[225,"Acid","P
|
oison",9,800,8,10,11.25],
[226,"Psycho Cut","Psychic",5,600,8,13.333333333333334,8.333333333333334],
[227,"Rock Throw","Rock",12,900,7,7.777777777777779,13.333333333333334],
[228,"Metal Claw","Steel",8,700,7,10,11.428571428571429],
[229,"Bullet Punch","Steel",9,900,10,11.11111111111111,10],
[230,"Water Gun","Water",5,500,5,10,10],
[231,"Splash","Water",0,1730,20,11.560693641618498,0],
[232,"Water Gun Blastoise","Water",10,1000,6,6,10],
[233,"Mud Slap","Ground",15,1400,12,8.571428571428571,10.714285714285715],
[234,"Zen Headbutt","Psychic",12,1100,10,9.09090909090909,10.909090909090908],
[235,"Confusion","Psychic",20,1600,15,9.375,12.5],
[236,"Poison Sting","Poison",5,600,7,11.666666666666668,8.333333333333334],
[237,"Bubble","Water",12,1200,14,11.666666666666668,10],
[238,"Feint Attack","Dark",10,900,9,10,11.11111111111111],
[239,"Steel Wing","Steel",11,800,6,7.5,13.75],
[240,"Fire Fang","Fire",11,900,8,8.88888888888889,12.222222222222223],
[241,"Rock Smash","Fighting",15,1300,10,7.692307692307692,11.538461538461537],
[242,"Transform","Normal",0,2230,0,0,0],
[243,"Counter","Fighting",12,900,8,8.88888888888889,13.333333333333334],
[244,"Powder Snow","Ice",6,1000,15,15,6],
[249,"Charge Beam","Electric",8,1100,15,13.636363636363637,7.2727272727272725],
[250,"Volt Switch","Electric",20,2300,25,10.869565217391305,8.695652173913045],
[253,"Dragon Tail","Dragon",15,1100,9,8.181818181818182,13.636363636363637],
[255,"Air Slash","Flying",14,1200,10,8.333333333333334,11.666666666666668],
[260,"Infestation","Bug",10,1100,14,12.727272727272727,9.09090909090909],
[261,"Struggle Bug","Bug",15,1500,15,10,10],
[263,"Astonish","Ghost",8,1100,14,12.727272727272727,7.2727272727272725],
[264,"Hex","Ghost",10,1200,15,12.5,8.333333333333334],
[266,"Iron Tail","Steel",15,1100,7,6.363636363636363,13.636363636363637],
[269,"Fire Spin","Fire",14,1100,10,9.09090909090909,12.727272727272727],
[271,"Bullet Seed","Grass",8,1100,14,12.727272727272727,7.2727272727272725],
[274,"Extrasensory","Psychic",12,1100,12,10.909090909090908,10.909090909090908],
[278,"Snarl","Dark",12,1100,12,10.909090909090908,10.909090909090908],
[281,"Hidden Power","Normal",15,1500,15,10,10],
[282,"Take Down","Normal",8,1200,10,8.333333333333334,6.666666666666667],
[283,"Waterfall","Water",16,1200,8,6.666666666666667,13.333333333333334],
[287,"Yawn","Normal",0,1700,15,8.823529411764707,0],
[291,"Present","Normal",5,1300,20,15.384615384615383,3.846153846153846],
[297,"Smack Down","Rock",16,1200,8,6.666666666666667,13.333333333333334],
]
def _get_basic_move_by_name(name):
for mv in BASIC_MOVE_DATA:
if name == mv[BASIC_MOVE.Name]:
return mv
return None
class CHARGE_MOVE:
ID = 0
Name = 1
Type = 2
PW = 3
Duration = 4
Crit = 5
NRG = 6
# Adapted from the GAME_MASTER_FILE Json Output at:
# https://github.com/pokemongo-dev-contrib/pokemongo-game-master/
# https://raw.githubusercontent.com/pokemongo-dev-contrib/pokemongo-game-master/master/versions/latest/GAME_MASTER.json
CHARGE_MOVE_DATA = [
# ID Name Type PW Duration (ms) Crit% NRG Cost
[13,"Wrap","Normal",60,2900,5,33],
[14,"Hyper Beam","Normal",150,3800,5,100],
[16,"Dark Pulse","Dark",80,3000,5,50],
[18,"Sludge","Poison",50,2100,5,33],
[20,"Vice Grip","Normal",35,1900,5,33],
[21,"Flame Wheel","Fire",60,2700,5,50],
[22,"Megahorn","Bug",90,2200,5,100],
[24,"Flamethrower","Fire",70,2200,5,50],
[26,"Dig","Ground",100,4700,5,50],
[28,"Cross Chop","Fighting",50,1500,5,50],
[30,"Psybeam","Psychic",70,3200,5,50],
[31,"Earthquake","Ground",120,3600,5,100],
[32,"Stone Edge","Rock",100,2300,5,100],
[33,"Ice Punch","Ice",50,1900,5,33],
[34,"Heart Stamp","Psychic",40,1900,5,33],
[35,"Discharge","Electric",65,2500,5,33],
[36,"Flash Cannon","Steel",100,2700,5,100],
[38,"Drill Peck","Flying",60,2300,5,33],
[39,"Ice Beam","Ice",90,3300,5,50],
[40,"Blizzard","Ice",130,3100,5,100],
[42,"Heat Wave","Fire",95,3000,5,100],
[45,"Aerial Ace","Flying",55,2400,5,33],
[46,"Drill Run","Ground",80,2800,5,50],
[47,"Petal Blizzard","Grass",110,2600,5,100],
[48,"Mega Drain","Grass",25,2600,5,50],
[49,"Bug Buzz","Bug",90,3700,5,50],
[50,"Poison Fang","Poison",35,1700,5,33],
[51,"Night Slash","Dark",50,2200,5,33],
[53,"Bubble Beam","Water",45,1900,5,33],
[54,"Submission","Fighting",60,2200,5,50],
[56,"Low Sweep","Fighting",40,1900,5,33],
[57,"Aqua Jet","Water",45,2600,5,33],
[58,"Aqua Tail","Water",50,1900,5,33],
[59,"Seed Bomb","Grass",55,2100,5,33],
[60,"Psyshock","Psychic",65,2700,5,33],
[62,"Ancient Power","Rock",70,3500,5,33],
[63,"Rock Tomb","Rock",70,3200,5,50],
[64,"Rock Slide","Rock",80,2700,5,50],
[65,"Power Gem","Rock",80,2900,5,50],
[66,"Shadow Sneak","Ghost",50,2900,5,33],
[67,"Shadow Punch","Ghost",40,1700,5,33],
[69,"Ominous Wind","Ghost",50,2300,5,33],
[70,"Shadow Ball","Ghost",100,3000,5,50],
[72,"Magnet Bomb","Steel",70,2800,5,33],
[74,"Iron Head","Steel",60,1900,5,50],
[75,"Parabolic Charge","Electric",25,2800,5,50],
[77,"Thunder Punch","Electric",45,1800,5,33],
[78,"Thunder","Electric",100,2400,5,100],
[79,"Thunderbolt","Electric",80,2500,5,50],
[80,"Twister","Dragon",45,2800,5,33],
[82,"Dragon Pulse","Dragon",90,3600,5,50],
[83,"Dragon Claw","Dragon",50,1700,5,33],
[84,"Disarming Voice","Fairy",70,3900,5,33],
[85,"Draining Kiss","Fairy",60,2600,5,50],
[86,"Dazzling Gleam","Fairy",100,3500,5,50],
[87,"Moonblast","Fairy",130,3900,5,100],
[88,"Play Rough","Fairy",90,2900,5,50],
[89,"Cross Poison","Poison",40,1500,5,33],
[90,"Sludge Bomb","Poison",80,2300,5,50],
[91,"Sludge Wave","Poison",110,3200,5,100],
[92,"Gunk Shot","Poison",130,3100,5,100],
[94,"Bone Club","Ground",40,1600,5,33],
[95,"Bulldoze","Ground",80,3500,5,50],
[96,"Mud Bomb","Ground",55,2300,5,33],
[99,"Signal Beam","Bug",75,2900,5,50],
[100,"X-Scissor","Bug",45,1600,5,33],
[101,"Flame Charge","Fire",70,3800,5,33],
[102,"Flame Burst","Fire",70,2600,5,50],
[103,"Fire Blast","Fire",140,4200,5,100],
[104,"Brine","Water",60,
|
ua-snap/downscale
|
snap_scripts/epscor_sc/move_raw_cmip5_tas_pr.py
|
Python
|
mit
| 1,039 | 0.052936 |
# # # # #
# MOVE THE NEWLY DOWNLOADED TAS / PR CMIP5 data from work desktop to /Shared
# # # # #
def move_new_dir( fn, output_dir ):
dirname, basename = os.path.split( fn )
elems = basename.split('.')[0].split( '_' )
variable, cmor_table, model,
|
scenario, experiment, years = elems
new_dir = os.path.join( output_dir, model, scenario, variable )
try:
if not os.path.exists( new_dir ):
os.makedirs( new_dir )
except:
pass
return shutil.copy( fn, new_dir )
if __name__ == '__main__':
import os, glob, shutil
path = '/srv/synda/sdt/data'
output_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/raw_cmip5_tas_pr'
filel
|
ist = []
for root, subs, files in os.walk( path ):
if len( files ) > 0:
filelist = filelist + [ os.path.join( root, i ) for i in files if i.endswith( '.nc' ) ]
out = [ move_new_dir( fn, output_dir ) for fn in filelist ]
# # # # # # # #
# # CHECK FOR DUPLICATES and remove by hand. this is tedious.
# GFDL - OK
# CCSM4 - FIXED OK
# GISS-E2-R - OK
# IPSL - OK
# MRI - OK
|
GuessWhoSamFoo/pandas
|
pandas/tests/indexes/timedeltas/test_astype.py
|
Python
|
bsd-3-clause
| 4,066 | 0 |
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Float64Index, Index, Int64Index, NaT, Timedelta, TimedeltaIndex,
timedelta_range)
import pandas.util.testing as tm
class TestTimedeltaIndex(object):
def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT,
Timede
|
lta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3,
dtype=object)
|
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range('1H', periods=2)
expected = pd.UInt64Index(
np.array([3600000000000, 90000000000000], dtype="uint64")
)
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize('dtype', [
float, 'datetime64', 'datetime64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
msg = 'Cannot cast TimedeltaArray to dtype'
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = pd.timedelta_range("1H", periods=2, freq='H')
result = obj.astype('category')
expected = pd.CategoricalIndex([pd.Timedelta('1H'),
pd.Timedelta('2H')])
tm.assert_index_equal(result, expected)
result = obj._data.astype('category')
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = pd.timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = pd.Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
|
meisterkleister/erpnext
|
erpnext/hr/doctype/salary_slip/salary_slip.py
|
Python
|
agpl-3.0
| 7,265 | 0.025327 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, cint, cstr, flt, getdate, nowdate, rounded
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.setup.utils import get_company_currency
from erpnext.hr.utils import set_employee_name
from erpnext.utilities.transaction_base import TransactionBase
class SalarySlip(TransactionBase):
def autoname(self):
self.name = make_autoname('Sal Slip/' +self.employee + '/.#####')
def get_emp_and_leave_details(self):
if self.employee:
self.get_leave_details()
struct = self.check_sal_struct()
if struct:
self.pull_sal_struct(struct)
def check_sal_struct(self):
struct = frappe.db.sql("""select name from `tabSalary Structure`
where employee=%s and is_active = 'Yes'""", self.employee)
if not struct:
msgprint(_("Please create Salary Structure for employee {0}").format(self.employee))
self.employee = None
return struct and struct[0][0] or ''
def pull_sal_struct(self, struct):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
self.update(make_salary_slip(struct, self).as_dict())
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee,
["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, lwp=None):
if not self.fiscal_year:
self.fiscal_year = frappe.db.get_default("fiscal_year")
if not self.month:
self.month = "%02d" % getdate(nowdate()).month
m = frappe.get_doc('Process Payroll').get_month_details(self.fiscal_year, self.month)
holidays = self.get_holidays_for_employee(m)
if not cint(frappe.db.get_value("HR Settings", "HR Settings",
"include_
|
holidays_in_total_working_days")):
m["month_days"] -= len(holidays)
if m["month_days"] < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = self.calculate_lwp(holidays, m)
self.total_days_in_month = m['month_days']
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(m)) - flt(lwp)
self.pay
|
ment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, m):
payment_days = m['month_days']
emp = frappe.db.sql("select date_of_joining, relieving_date from `tabEmployee` \
where name = %s", self.employee, as_dict=1)[0]
if emp['relieving_date']:
if getdate(emp['relieving_date']) > m['month_start_date'] and \
getdate(emp['relieving_date']) < m['month_end_date']:
payment_days = getdate(emp['relieving_date']).day
elif getdate(emp['relieving_date']) < m['month_start_date']:
frappe.throw(_("Employee relieved on {0} must be set as 'Left'").format(emp["relieving_date"]))
if emp['date_of_joining']:
if getdate(emp['date_of_joining']) > m['month_start_date'] and \
getdate(emp['date_of_joining']) < m['month_end_date']:
payment_days = payment_days - getdate(emp['date_of_joining']).day + 1
elif getdate(emp['date_of_joining']) > m['month_end_date']:
payment_days = 0
return payment_days
def get_holidays_for_employee(self, m):
holidays = frappe.db.sql("""select t1.holiday_date
from `tabHoliday` t1, tabEmployee t2
where t1.parent = t2.holiday_list and t2.name = %s
and t1.holiday_date between %s and %s""",
(self.employee, m['month_start_date'], m['month_end_date']))
if not holidays:
holidays = frappe.db.sql("""select t1.holiday_date
from `tabHoliday` t1, `tabHoliday List` t2
where t1.parent = t2.name and ifnull(t2.is_default, 0) = 1
and t2.fiscal_year = %s
and t1.holiday_date between %s and %s""", (self.fiscal_year,
m['month_start_date'], m['month_end_date']))
holidays = [cstr(i[0]) for i in holidays]
return holidays
def calculate_lwp(self, holidays, m):
lwp = 0
for d in range(m['month_days']):
dt = add_days(cstr(m['month_start_date']), d)
if dt not in holidays:
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and ifnull(t2.is_lwp, 0) = 1
and t1.docstatus = 1
and t1.employee = %s
and %s between from_date and to_date
""", (self.employee, dt))
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where month = %s and fiscal_year = %s and docstatus != 2
and employee = %s and name != %s""",
(self.month, self.fiscal_year, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this month").format(self.employee))
def validate(self):
from frappe.utils import money_in_words
self.check_existing()
if not (len(self.get("earnings")) or
len(self.get("deductions"))):
self.get_emp_and_leave_details()
else:
self.get_leave_details(self.leave_without_pay)
if not self.net_pay:
self.calculate_net_pay()
company_currency = get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
set_employee_name(self)
def calculate_earning_total(self):
self.gross_pay = flt(self.arrear_amount) + flt(self.leave_encashment_amount)
for d in self.get("earnings"):
if cint(d.e_depends_on_lwp) == 1:
d.e_modified_amount = rounded((flt(d.e_amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("e_modified_amount", "earnings"))
elif not self.payment_days:
d.e_modified_amount = 0
elif not d.e_modified_amount:
d.e_modified_amount = d.e_amount
self.gross_pay += flt(d.e_modified_amount)
def calculate_ded_total(self):
self.total_deduction = 0
for d in self.get('deductions'):
if cint(d.d_depends_on_lwp) == 1:
d.d_modified_amount = rounded((flt(d.d_amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("d_modified_amount", "deductions"))
elif not self.payment_days:
d.d_modified_amount = 0
elif not d.d_modified_amount:
d.d_modified_amount = d.d_amount
self.total_deduction += flt(d.d_modified_amount)
def calculate_net_pay(self):
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
self.calculate_earning_total()
self.calculate_ded_total()
self.net_pay = flt(self.gross_pay) - flt(self.total_deduction)
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
def on_submit(self):
if(self.email_check == 1):
self.send_mail_funct()
def send_mail_funct(self):
receiver = frappe.db.get_value("Employee", self.employee, "company_email")
if receiver:
subj = 'Salary Slip - ' + cstr(self.month) +'/'+cstr(self.fiscal_year)
frappe.sendmail([receiver], subject=subj, message = _("Please see attachment"),
attachments=[frappe.attach_print(self.doctype, self.name, file_name=self.name)])
else:
msgprint(_("Company Email ID not found, hence mail not sent"))
|
Fl0r14n/django_googleapi
|
gdrive/urls.py
|
Python
|
mit
| 560 | 0 |
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.auth.views import login, logout
import views
urlpatterns = patterns(
'gauth',
url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),
url(r'^login/$', logout, {'template_name': 'logout.html'}, name='logout'),
url(r'^oauth2_begin/$', views.oauth2_begin, name='oauth2_begin'),
url(r'^' + settings.OAUTH2_CALLBACK + '/$', views.oauth2_callback),
url(r'^oauth2_complete/$
|
', views.oauth2_complete, name='oauth2_complete'),
|
)
|
openstack/heat
|
heat/common/policy.py
|
Python
|
apache-2.0
| 7,615 | 0 |
#
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not us
|
e this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribu
|
ted under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Based on glance/api/policy.py
"""Policy Engine For Heat."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import opts
from oslo_policy import policy
from oslo_utils import excutils
from heat.common import exception
from heat.common.i18n import _
from heat import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_RULES = policy.Rules.from_dict({'default': '!'})
DEFAULT_RESOURCE_RULES = policy.Rules.from_dict({'default': '@'})
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(CONF, DEFAULT_POLICY_FILE)
ENFORCER = None
class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, scope='heat', exc=exception.Forbidden,
default_rule=DEFAULT_RULES['default'], policy_file=None):
self.scope = scope
self.exc = exc
self.default_rule = default_rule
self.enforcer = policy.Enforcer(
CONF, default_rule=default_rule, policy_file=policy_file)
self.log_not_registered = True
# TODO(ramishra) Remove this once remove the deprecated rules.
self.enforcer.suppress_deprecation_warnings = True
# register rules
self.enforcer.register_defaults(policies.list_rules())
self.file_rules = self.enforcer.file_rules
self.registered_rules = self.enforcer.registered_rules
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules."""
rules_obj = policy.Rules(rules, self.default_rule)
self.enforcer.set_rules(rules_obj, overwrite)
def load_rules(self, force_reload=False):
"""Set the rules found in the json file on disk."""
self.enforcer.load_rules(force_reload)
def _check(self, context, rule, target, exc,
is_registered_policy=False, *args, **kwargs):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param rule: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
do_raise = False if not exc else True
credentials = context.to_policy_values()
if is_registered_policy:
try:
return self.enforcer.authorize(rule, target, credentials,
do_raise=do_raise,
exc=exc, action=rule)
except policy.PolicyNotRegistered:
if self.log_not_registered:
with excutils.save_and_reraise_exception():
LOG.exception(_('Policy not registered.'))
else:
raise
else:
return self.enforcer.enforce(rule, target, credentials,
do_raise, exc=exc, *args, **kwargs)
def enforce(self, context, action, scope=None, target=None,
is_registered_policy=False):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
_action = '%s:%s' % (scope or self.scope, action)
_target = target or {}
return self._check(context, _action, _target, self.exc, action=action,
is_registered_policy=is_registered_policy)
def check_is_admin(self, context):
"""Whether or not is admin according to policy.
By default the rule will check whether or not roles contains
'admin' role and is admin project.
:param context: Heat request context
:returns: A non-False value if the user is admin according to policy
"""
return self._check(context, 'context_is_admin', target={}, exc=None,
is_registered_policy=True)
def get_policy_enforcer():
# This method is used by oslopolicy CLI scripts to generate policy
# files from overrides on disk and defaults in code.
CONF([], project='heat')
return get_enforcer()
def get_enforcer():
global ENFORCER
if ENFORCER is None:
ENFORCER = Enforcer()
return ENFORCER
class ResourceEnforcer(Enforcer):
def __init__(self, default_rule=DEFAULT_RESOURCE_RULES['default'],
**kwargs):
super(ResourceEnforcer, self).__init__(
default_rule=default_rule, **kwargs)
self.log_not_registered = False
def _enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
try:
result = super(ResourceEnforcer, self).enforce(
context, res_type,
scope=scope or 'resource_types',
target=target, is_registered_policy=is_registered_policy)
except policy.PolicyNotRegistered:
result = True
except self.exc as ex:
LOG.info(str(ex))
raise
if not result:
if self.exc:
raise self.exc(action=res_type)
return result
def enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
# NOTE(pas-ha): try/except just to log the exception
result = self._enforce(context, res_type, scope, target,
is_registered_policy=is_registered_policy)
if result:
# check for wildcard resource types
subparts = res_type.split("::")[:-1]
subparts.append('*')
res_type_wc = "::".join(subparts)
try:
return self._enforce(context, res_type_wc, scope, target,
is_registered_policy=is_registered_policy)
except self.exc:
raise self.exc(action=res_type)
return result
def enforce_stack(self, stack, scope=None, target=None,
is_registered_policy=False):
for res in stack.resources.values():
self.enforce(stack.context, res.type(), scope=scope, target=target,
is_registered_policy=is_registered_policy)
|
sdgdsffdsfff/pymesos
|
pymesos/scheduler.py
|
Python
|
bsd-3-clause
| 8,630 | 0.003708 |
import time
import logging
import struct
import socket
from mesos.interface.mesos_pb2 import TASK_LOST, MasterInfo
from .messages_pb2 import (
RegisterFrameworkMessage, ReregisterFrameworkMessage,
DeactivateFrameworkMessage, UnregisterFrameworkMessage,
ResourceRequestMessage, ReviveOffersMessage, LaunchTasksMessage, KillTaskMessage,
StatusUpdate, StatusUpdateAcknowledgementMessage, FrameworkToExecutorMessage,
ReconcileTasksMessage
)
from .process import UPID, Process, async
logger = logging.getLogger(__name__)
class MesosSchedulerDriver(Process):
def __init__(self, sched, framework, master_uri):
Process.__init__(self, 'scheduler')
self.sched = sched
#self.executor_info = executor_info
self.master_uri = master_uri
self.framework = framework
self.framework.failover_timeout = 100
self.framework_id = framework.id
self.master = None
self.detector = None
self.connected = False
self.savedOffers = {}
self.savedSlavePids = {}
@async # called by detector
def onNewMasterDetectedMessage(self, data):
try:
info = MasterInfo()
info.ParseFromString(data)
ip = socket.inet_ntoa(struct.pack('<I', info.ip))
master = UPID('master@%s:%s' % (ip, info.port))
except:
master = UPID(data)
self.connected = False
self.register(master)
@async # called by detector
def onNoMasterDetectedMessage(self):
self.connected = False
self.master = None
def register(self, master):
if self.connected or self.aborted:
return
if master:
if not self.framework_id.value:
msg = RegisterFrameworkMessage()
msg.framework.MergeFrom(self.framework)
else:
msg = ReregisterFrameworkMessage()
msg.framework.MergeFrom(self.framework)
msg.failover = True
self.send(master, msg)
self.delay(2, lambda:self.register(master))
def onFrameworkRegisteredMessage(self, framework_id, master_info):
self.framework_id = framework_id
self.framework.id.MergeFrom(framework_id)
self.connected = True
self.master = UPID('master@%s:%s' % (socket.inet_ntoa(struct.pack('<I', master_info.ip)), master_info.port))
self.link(self.master, self.onDisconnected)
self.sched.registered(self, framework_id, master_info)
def onFrameworkReregisteredMessage(self, framework_id, master_info):
assert self.framework_id == framework_id
self.connected = True
self.master = UPID('master@%s:%s' % (socket.inet_ntoa(struct.pack('<I', master_info.ip)), master_info.port))
self.link(self.master, self.onDisconnected)
self.sched.reregistered(self, master_info)
def onDisconnected(self):
self.connected = False
logger.warning("disconnected from master")
self.delay(5, lambda:self.register(self.master))
def onResourceOffersMessage(self, offers, pids):
for offer, pid in zip(offers, pids):
self.savedOffers.setdefault(offer.id.value, {})[offer.slave_id.value] = UPID(pid)
self.sched.resourceOffers(self, list(offers))
def onRescindResourceOfferMessage(self, offer_id):
self.savedOffers.pop(offer_id.value, None)
self.sched.offerRescinded(self, offer_id)
def onStatusUpdateMessage(self, update, pid=''):
if self.sender.addr != self.master.addr:
logger.warning("ignore status update message from %s instead of leader %s", self.sender, self.master)
return
assert self.framework_id == update.framework_id
self.sched.statusUpdate(self, update.status)
if not self.aborted and self.sender.addr and pid:
reply = StatusUpdateAcknowledgementMessage()
reply.framework_id.MergeFrom(self.framework_id)
reply.slave_id.MergeFrom(update.slave_id)
reply.task_id.MergeFrom(update.status.task_id)
reply.uuid = update.uuid
try: self.send(self.master, reply)
except IOError: pass
def onLostSlaveMessage(self, slave_id):
self.sched.slaveLost(self, slave_id)
def onExecutorToFrameworkMessage(self, slave_id, framework_id, executor_id, data):
self.sched.frameworkMessage(self, executor_id, slave_id, data)
def onFrameworkErrorMessage(self, message, code=0):
self.sched.error(self, message)
def start(self):
Process.start(self)
uri = self.master_uri
if uri.startswith('zk://') or uri.startswith('zoo://'):
from .detector import MasterDetector
self.detector = MasterDetector(uri[uri.index('://') + 3:], self)
self.detector.start()
else:
if not ':' in uri:
uri += ':5050'
self.onNewMasterDetectedMessage('master@%s' % uri)
def abort(self):
if self.connected:
msg = DeactivateFrameworkMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
Process.abort(self)
def stop(self, failover=False):
if self.connected and not failover:
msg = UnregisterFrameworkMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
if self.detector:
self.detector.stop()
Process.stop(self)
@async
def requestResources(self, requests):
if not self.connected:
return
msg = ResourceRequestMessage()
msg.framework_id.MergeFrom(self.framework_id)
for req in requests:
msg.requests.add().MergeFrom(req)
self.send(self.master, msg)
@async
def reviveOffers(self):
if not self.connected:
return
msg = ReviveOffersMessage()
msg.framework_id.MergeFrom(sel
|
f.framework_id)
self.send(sel
|
f.master, msg)
@async
def reconcileTasks(self, statuses=None):
if not self.connected:
return
msg = ReconcileTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
if statuses is not None:
msg.statuses = statuses
self.send(self.master, msg)
def launchTasks(self, offer_id, tasks, filters):
if not self.connected or offer_id.value not in self.savedOffers:
for task in tasks:
update = StatusUpdate()
update.framework_id.MergeFrom(self.framework_id)
update.status.task_id.MergeFrom(task.task_id)
update.status.state = TASK_LOST
update.status.message = 'Master disconnected' if not self.connected else "invalid offer_id"
update.timestamp = time.time()
update.uuid = ''
self.onStatusUpdateMessage(update)
return
msg = LaunchTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.offer_ids.add().MergeFrom(offer_id)
msg.filters.MergeFrom(filters)
for task in tasks:
msg.tasks.add().MergeFrom(task)
pid = self.savedOffers.get(offer_id.value, {}).get(task.slave_id.value)
if pid and task.slave_id.value not in self.savedSlavePids:
self.savedSlavePids[task.slave_id.value] = pid
self.savedOffers.pop(offer_id.value)
self.send(self.master, msg)
def declineOffer(self, offer_id, filters=None):
if not self.connected:
return
msg = LaunchTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.offer_ids.add().MergeFrom(offer_id)
if filters:
msg.filters.MergeFrom(filters)
self.send(self.master, msg)
@async
def killTask(self, task_id):
if not self.connected:
return
msg = KillTaskMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.task_id.MergeFrom(task_id)
self.send(self.master, msg)
@async
def
|
xapple/pyrotrfid
|
doc/conf.py
|
Python
|
gpl-3.0
| 7,241 | 0.005662 |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import pyrotrfid
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrotrfid'
copyright = u'GLP3'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyrotrfid.__version__
# The full version, including alpha/beta/rc tags.
release = pyrotrfid.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = project + " v" + release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'**': 'links.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrotrfiddoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt',
|
'11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files.
|
List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'pyrotrfid.tex', u'pyrotrfid Documentation', u'', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'pyrotrfid', u'pyrotrfid Documentation', [u''], 1)]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
|
seecloud/ceagle
|
ceagle/api/v1/regions.py
|
Python
|
apache-2.0
| 1,652 | 0 |
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS
|
IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from oss_lib im
|
port config
from ceagle.api import client
from ceagle.api_fake_data import fake_regions
CONF = config.CONF
bp = flask.Blueprint("regions", __name__)
@bp.route("", defaults={"detailed": False})
@bp.route("/detailed", defaults={"detailed": True})
@fake_regions.get_regions
def get_regions(detailed):
regions = {}
for service_name in CONF["services"].keys():
if service_name == "infra":
continue # TODO(boris-42): This should not be checked here.
service_client = client.get_client(service_name)
resp, code = service_client.get("/api/v1/regions")
if code != 200:
# FIXME ADD LOGS HERE
continue
for r in resp:
regions.setdefault(r, {"services": []})
regions[r]["services"].append(service_name)
if not detailed:
return flask.jsonify({"regions": list(regions.keys())})
return flask.jsonify({"regions": regions})
def get_blueprints():
return [["/regions", bp]]
|
nugget/home-assistant
|
homeassistant/components/bmw_connected_drive/lock.py
|
Python
|
apache-2.0
| 3,775 | 0 |
"""Support for BMW car locks with BMW ConnectedDrive."""
import logging
from homeassistant.components.bmw_connected_drive import DOMAIN as BMW_DOMAIN
from homeassistant.components.lock import LockDevice
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
DEPENDENCIES = ['bmw_connected_drive']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BMW Connected Drive lock."""
accounts = hass.data[BMW_DOMAIN]
_LOGGER.debug('Found BMW accounts: %s',
', '.join([a.name for a in accounts]))
devices = []
for account in accounts:
if not account.read_only:
for vehicle in account.account.vehicles:
device = BMWLock(account, vehicle, 'lock', 'BMW lock')
devices.append(device)
add_entities(devices, True)
class BMWLock(LockDevice):
"""Representation of a BMW vehicle lock."""
def __init__(self, account, vehicle, attribute: str, sensor_name):
"""Initialize the lock."""
self._account = account
self._vehicle = vehicle
self._attribute = attribute
self._name = '{} {}'.format(self._vehicle.name, self._attribute)
self._unique_id = '{}-{}'.format(self._vehicle.vin, self._attribute)
self._sensor_name = sensor_name
self._state = None
@property
def should_poll(self):
"""Do not poll this class.
Updates are triggered from BMWConnectedDriveAccount.
"""
return False
@property
def unique_id(self):
"""Return the unique ID of the lock."""
return self._unique_id
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the lock."""
vehicle_state = self._vehicle.state
return {
'car': self._vehicle.name,
'door_lock_state': vehicle_state.door_lock_state.value
}
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
def lock(self, **kwargs):
"""Lock the car."""
_LOGGER.debug("%s: locking doors", self._vehicle.name)
# Optimistic
|
state set here because it takes some time before the
# update callback response
self._state = STATE_LOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_lock()
def u
|
nlock(self, **kwargs):
"""Unlock the car."""
_LOGGER.debug("%s: unlocking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_UNLOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_unlock()
def update(self):
"""Update state of the lock."""
from bimmer_connected.state import LockState
_LOGGER.debug("%s: updating data for %s", self._vehicle.name,
self._attribute)
vehicle_state = self._vehicle.state
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._state = STATE_LOCKED \
if vehicle_state.door_lock_state \
in [LockState.LOCKED, LockState.SECURED] \
else STATE_UNLOCKED
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
|
MSusik/invenio
|
invenio/legacy/websearch_external_collections/__init__.py
|
Python
|
gpl-2.0
| 21,464 | 0.00601 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""External collection 'core' file.
Perform search, database access."""
__revision__ = "$Id$"
import cgi
import sys
from copy import copy
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.config import CFG_SITE_LANG
from invenio.legacy.dbquery import run_sql, OperationalError, ProgrammingError
from invenio.base.i18n import gettext_set_language
from .config import CFG_EXTERNAL_COLLECTION_TIMEOUT
from .searcher import external_collections_dictionary
from .getter import HTTPAsyncPageGetter, async_download
from .templates import print_results, print_timeout
from .utils import get_collection_id, get_collection_descendants, \
warning, get_verbose_print
import invenio.legacy.template
# Global variables
template = invenio.legacy.template.load('websearch_external_collections')
external_collections_state = None
dico_collection_external_searches = None
dico_collection_seealso = None
#dico_collection_external_searches = {}
#dico_collection_seealso = {}
def print_external_results_overview(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG, print_overview=True):
"""Print the external collection overview box. Return the selected external collections and parsed query"""
from invenio.legacy.search_engine import create_basic_search_units
assert req
vprint = get_verbose_print(req, 'External collection (print_external_results_overview): ', verbosity_level)
pattern = bind_patterns(pattern_list)
vprint(3, 'pattern = %s' % cgi.escape(pattern))
if not pattern:
return (None, None, None, None)
basic_search_units = create_basic_search_units(None, pattern, field)
vprint(3, 'basic_search_units = %s' % cgi.escape(repr(basic_search_units)))
(search_engines, seealso_engines) = select_external_engines(current_collection, external_collection)
vprint(3, 'search_engines = ' + str(search_engines))
vprint(3, 'seealso_engines = ' + str(seealso_engines))
search_engines_list = external_collection_sort_engine_by_name(search_engines)
vprint(3, 'search_engines_list (sorted) : ' + str(search_engines_list))
if print_overview:
html = template.external_collection_overview(lang, search_engines_list)
req.write(html)
return (search_engines, seealso_engines, pattern, basic_search_units)
def perform_external_collection_search(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, print_overview=True,
print_search_info=True, print_see_also_box=True, print_body=True):
"""Search external collection and print the seealso box."""
vprint = get_verbose_print(req, 'External collection: ', verbosity_level)
if selected_external_collections_infos:
(search_engines, seealso_engines, pattern, basic_search_units) = selected_external_collections_infos
else:
(search_engines, seealso_engines, pattern, basic_search_units) = print_external_results_overview(req,
current_collection, pattern_list, field, external_collection, verbosity_level, lang, print_overview=print_overview)
if not pattern:
return
do_external_search(req, lang, vprint, basic_search_units, search_engines, print_search_info, print_body)
if print_see_also_box:
create_seealso_box(req, lang, vprint, basic_search_units, seealso_engines, pattern)
vprint(3, 'end')
def bind_patterns(pattern_list):
"""Combine a list of patterns in an unique pattern.
pattern_list[0] should be the standart search pattern,
pattern_list[1:] are advanced search patterns."""
# just in case an empty list is fed to this function
try:
if pattern_list[0]:
return pattern_list[0]
except IndexError:
return None
pattern = ""
for pattern_part in pattern_list[1:]:
if pattern_part:
pattern += " " + pattern_part
return pattern.strip()
# See also box
def create_seealso_box(req, lang, vprint, basic_search_units=None, seealso_engines=None, query=''):
"Create the box that proposes links to other useful search engines like Google."
vprint(3, 'Create seealso box')
seealso_engines_list = external_collection_sort_engine_by_name(seealso_engines)
vprint(3, 'seealso_engines_list = ' + str(seealso_engines_list))
links = build_seealso_links(basic_search_units, seealso_engines_list, req, lang, query)
html = template.external_collection_seealso_box(lang, links)
req.write(html)
def build_seealso_links(basic_search_units, seealso_engines, req, lang, query):
"""Build the links for the see also box."""
_ = gettext_set_language(lang)
links = []
for engine in seealso_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
url = user_url or url
if url:
links.append('<a class="google" href="%(url)s">%(query)s %(text_in)s %(name)s</a>' % \
{'url': cgi.escape(url),
'query': cgi.escape(query),
'text_in': _('in'),
'name': _(engine.name)})
return links
# Selection
def select_external_engines(collection_name, selected_external_searches):
"""Build a tuple of two sets. The first one is the list of engine to use for an external search and the
second one is for the seealso box."""
collection_id = get_collection_id(collection_name)
if not collection_id:
return (None, None)
if not type(selected_external_searches) is list:
selected_external_searches = [selected_external_searches]
seealso_engines = set()
search_engines = set()
if collection_id in dico_collection_seealso:
|
seealso_engines = copy(dico_collection_seealso[collection_id])
if collection_id in dico_collection_external_searches:
seealso_engines = seealso_engines.union(dico_collection_external_searches[collection_id])
for ext_search_name in selected_external_searches:
if ext_search_name in external_
|
collections_dictionary:
engine = external_collections_dictionary[ext_search_name]
if engine.parser:
search_engines.add(engine)
else:
warning('select_external_engines: %(ext_search_name)s unknown.' % locals())
seealso_engines = seealso_engines.difference(search_engines)
return (search_engines, seealso_engines)
# Search
def do_external_search(req, lang, vprint, basic_search_units, search_engines, print_search_info=True, print_body=True):
"""Make the external search."""
_ = gettext_set_language(lang)
vprint(3, 'beginning external search')
engines_list = []
for engine in search_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
if url:
engines_list.append([url, engine, user_url])
pagegetters_list = [HTTPAsyncPageGetter(engine[0]) for engine in engines_list]
def finished(pagegetter, data, current_time, print_search_info=True, print_body=True):
"""Fu
|
manishpatell/erpcustomizationssaiimpex123qwe
|
addons/product_stone_search_ept/py/product/product_category.py
|
Python
|
agpl-3.0
| 912 | 0.024123 |
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class product_category(osv.osv):
_inherit='product.category'
_columns = {
'sale_price' : fields.float('Sale Price',digits_compute=dp.get_precision('Product Price')),
'shape_id':fields.many2one('product.shape',string="Shape"),
'weight_from':fields.float('Weight
|
From',digits_compute=dp.get_precision('Stock Weight')),
'weight_to':fields.float('Weight To',digits_compute=dp.get_precision('Stock Weight')),
'color_id':fields.many2one('product.color',string='Color'),
|
'clarity_id':fields.many2one('product.clarity',string='Clarity', ondelete='restrict'),
'shape_line':fields.one2many('shape.line','categ_id','Shape Lines'),
}
|
VictorThompson/ActivityTracker
|
py/geepeeex.py
|
Python
|
gpl-3.0
| 5,539 | 0.017873 |
#!/usr/bin/python3
import gpxpy
import datetime
import time
import os
import gpxpy.gpx
import sqlite3
import pl
import re
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
filebase = os.environ["XDG_DATA_HOME"]+"/"+os.environ["APP_ID"].split('_')[0]
def create_gpx():
# Creating a new file:
# --------------------
gpx = gpxpy.gpx.GPX()
# Create first track in our GPX:
gpx_track = gpxpy.gpx.GPXTrack()
gpx.tracks.append(gpx_track)
# Create first segment in our GPX track:
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx_track.segments.append(gpx_segment)
# Create points:
return gpx
def write_gpx(gpx,name,act_type):
# You can add routes and waypoints, too...
tzname=None
npoints=None
# polyline encoder default values
numLevels = 18;
zoomFactor = 2;
epsilon = 0.0;
forceEndpoints = True;
##print('Created GPX:', gpx.to_xml())
ts = int(time.time())
filename = "%s/%i.gpx" % (filebase,ts)
a = open(filename, 'w')
a.write(gpx.to_xml())
a.close()
gpx.simplify()
#gpx.reduce_points(1000)
trk = pl.read_gpx_trk(gpx.to_xml(),tzname,npoints,2,None)
try:
polyline=pl.print_gpx_google_polyline(trk,numLevels,zoomFactor,epsilon,forceEndpoints)
except UnboundLocalError as er:
print(er)
print("Not enough points to create a polyline")
polyline=""
#polyline="polyline"
add_run(gpx,name,act_type,filename,polyline)
def add_point(gpx,lat,lng,elev):
gpx.tracks[0].segments[0].points.append(gpxpy.gpx.GPXTrackPoint(lat, lng, elevation=elev,time=datetime.datetime.now()))
def add_run(gpx, name,act_type,filename,polyline):
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists activities
(id INTEGER PRIMARY KEY AUTOINCREMENT,name text, act_date text, distance text,
speed text, act_type text,filename text,polyline text)""")
sql = "INSERT INTO activities VALUES (?,?,?,?,?,?,?,?)"
start_time, end_time = gpx.get_time_bounds()
l2d='{:.3f}'.format(gpx.length_2d() / 1000.)
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx.get_moving_data()
print(max_speed)
#print('%sStopped distance: %sm' % stopped_distance)
maxspeed = 'Max speed: {:.2f}km/h'.format(max_speed * 60. ** 2 / 1000. if max_speed else 0)
duration = 'Duration: {:.2f}min'.format(gpx.get_duration() / 60)
print("-------------------------")
print(name)
print(start_time)
print(l2d)
print(maxspeed)
print("-------------------------")
try:
cursor.execute(sql, [None, name,start_time,l2d,duration,act_type,filename,polyline])
conn.commit()
except sqlite3.Error as er:
print(er)
conn.close()
def get_runs():
#add_run("1", "2", "3", "4")
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists activities
(id INTEGER PRIMARY KEY AUTOINCREMENT,name text, act
|
_date text, distance text,
speed text, act_type text,filename text,polyline text)""")
ret_data=[]
sql = "SELECT * FROM activities LIMIT 30"
for i in cursor.execute(sql):
ret_data.append(dict(i))
conn.close()
|
return ret_data
def get_units():
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists settings
(units text)""")
ret_data=[]
sql = "SELECT units FROM settings"
cursor.execute(sql)
data=cursor.fetchone()
if data is None:
print("NONESIES")
cursor.execute("INSERT INTO settings VALUES ('kilometers')")
conn.commit()
conn.close()
return "kilometers"
return data
def set_units(label):
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("UPDATE settings SET units=? WHERE 1", (label,))
conn.commit()
conn.close()
def onetime_db_fix():
os.makedirs(filebase, exist_ok=True)
filename = "%s/%s" % (filebase,".dbfixed")
if not os.path.exists(filename):
print("Fixing db")
conn = sqlite3.connect('%s/activities.db' % filebase)
numonly = re.compile("(\d*\.\d*)")
cursor = conn.cursor()
a=get_runs()
sql="UPDATE activities SET distance=? WHERE id=?"
for i in a:
print(i["distance"])
b=numonly.search(i["distance"])
print(b.group(0))
print(b)
cursor.execute(sql, (b.group(0), i["id"]))
conn.commit()
conn.close()
dotfile=open(filename, "w")
dotfile.write("db fixed")
dotfile.close
else:
print("db already fixed")
def rm_run(run):
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
sql = "DELETE from activities WHERE id=?"
try:
cursor.execute(sql, [run])
conn.commit()
except sqlite3.Error as er:
print("-------------______---_____---___----____--____---___-----")
print(er)
conn.close()
def km_to_mi(km):
return km * 0.62137
def get_data():
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx.get_moving_data()
return moving_distance, moving_time
|
mozaik-association/mozaik
|
mozaik_website_event_track/__manifest__.py
|
Python
|
agpl-3.0
| 525 | 0 |
# Copyright 202
|
1 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Mozaik Website Event Track",
"summary": """
This module allows to see the event menu configuration
even without activated debug mode""",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV",
"website": "https://github.com/OCA/mozaik",
"depends": [
# Odoo
"website_event_track",
],
"d
|
ata": [
"views/event_event.xml",
],
}
|
ebattenberg/crbm-drum-patterns
|
crbm.py
|
Python
|
gpl-3.0
| 63,541 | 0.013047 |
import cPickle as pkl
import pdb
import datetime
import time
import numpy as np
import pylab as pl
import scipy.stats
import scipy.special
from scipy.special import gamma
from scipy.misc import factorial
import gnumpy as gp
import data_helper
class RBM(object):
'''
Restricted Boltzmann Machine (RBM) using numpy
'''
def __init__(self, params={}):
'''
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing
|
CD updates.
input:
-----------------
Nv: number of visible units
Nh: number of hidden units
|
vis_unit: type of visible unit {'binary','linear'}
('linear' = rectified linear unit)
vis_scale: maximum output value for linear visible units
(average std_dev is ~= 1 at this scale,
so pre-scale training data with this in mind)
bv: visible bias
other params:
-----------------
W: weight between current hidden and visible units (undirected)
[Nv x Nh]
bh: hidden bias
'''
dtype = 'float32'
Nv = params['Nv']
Nh = params['Nh']
vis_unit = params.get('vis_unit','binary')
vis_scale = params.get('vis_scale')
bv = params.get('bv')
Th = params.get('Th',0)
if vis_unit not in ['binary','linear']:
raise ValueError, 'Unknown visible unit type %s' % vis_unit
if vis_unit == 'linear':
if vis_scale is None:
raise ValueError, 'Must set vis_scale for linear visible units'
elif vis_unit == 'binary':
vis_scale = 1.
# W is initialized with `initial_W` which is uniformly sampled
# from -4.*sqrt(6./(Nv+Nh)) and 4.*sqrt(6./(Nh+Nv))
# the output of uniform if converted using asarray to dtype
W = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv+Nh)),
high = 4*np.sqrt(6./(Nv+Nh)),
size = (Nv, Nh)),
dtype = dtype)
W = gp.garray(W)
bh = gp.zeros(Nh)
if bv is None :
bv = gp.zeros(Nv)
else:
bv = gp.garray(bv)
# params -------------------------------------------
self.dtype = 'float32'
self.Nv = Nv # num visible units
self.Nh = Nh # num hidden units
self.Th = Th # used for framing input
self.vis_unit = vis_unit # type of visible output unit
self.vis_scale = vis_scale # scale of linear output units
self.W = W # vis<->hid weights
self.bv = bv # vis bias
self.bh = bh # hid bias
self.W_update = gp.zeros((Nv,Nh))
self.bh_update = gp.zeros((Nh,))
self.bv_update = gp.zeros((Nv,))
self.params = [ 'dtype',
'vis_unit','vis_scale',
'Nv','Nh',
'W','bh','bv']
def save_params(self,filename=None):
'''
save parameters to file
'''
if filename is None:
fileid = np.random.randint(100000)
filename = 'RBM_%u.pkl' % fileid
params_out = {}
for p in self.params:
val = vars(self)[p]
if type(val) is gp.garray:
params_out[p] = val.as_numpy_array()
else:
params_out[p] = val
fp = open(filename,'wb')
pkl.dump(params_out,fp,protocol=-1)
fp.close()
print 'saved %s' % filename
def load_params(self,filename):
'''
load parameters from file
'''
fp = open(filename,'rb')
params_in = pkl.load(fp)
fp.close()
for key,value in params_in.iteritems():
vars(self)[key] = value
Nv,Nh = self.Nv,self.Nh
dtype = self.dtype
self.W_update = gp.zeros((Nv,Nh))
self.bh_update = gp.zeros((Nh,))
self.bv_update = gp.zeros((Nv,))
self.W = gp.garray(self.W)
self.bh = gp.garray(self.bh)
self.bv = gp.garray(self.bv)
def return_params(self):
'''
return a formatted string containing scalar parameters
'''
output = 'Nv=%u, Nh=%u, vis_unit=%s, vis_scale=%0.2f' \
% (self.Nv,self.Nh,self.vis_unit,self.vis_scale)
return output
def mean_field_h_given_v(self,v):
'''
compute mean-field reconstruction of P(h=1|v)
'''
prob = sigmoid(self.bh + gp.dot(v, self.W))
return prob
def mean_field_v_given_h(self,h):
'''
compute mean-field reconstruction of P(v|h)
'''
x = self.bv + gp.dot(h, self.W.T)
if self.vis_unit == 'binary':
return sigmoid(x)
elif self.vis_unit == 'linear':
return log_1_plus_exp(x) - log_1_plus_exp(x-self.vis_scale)
return prob
def sample_h_given_v(self,v):
'''
compute samples from P(h|v)
'''
prob = self.mean_field_h_given_v(v)
samples = prob.rand() < prob
return samples, prob
def sample_v_given_h(self,h):
'''
compute samples from P(v|h)
'''
if self.vis_unit == 'binary':
mean = self.mean_field_v_given_h(h)
samples = mean.rand() < mean
return samples, mean
elif self.vis_unit == 'linear':
x = self.bv + gp.dot(h, self.W.T)
# variance of noise is sigmoid(x) - sigmoid(x - vis_scale)
stddev = gp.sqrt(sigmoid(x) - sigmoid(x - self.vis_scale))
mean = log_1_plus_exp(x) - log_1_plus_exp(x-self.vis_scale)
noise = stddev * gp.randn(x.shape)
samples = mean + noise
samples[samples < 0] = 0
samples[samples > self.vis_scale] = self.vis_scale
return samples, mean
def cdk(self,K,v0_data,rate=0.001,momentum=0.0,weight_decay=0.001,noisy=0):
'''
compute K-step contrastive divergence update
input:
K - number of gibbs iterations (for cd-K)
v0_data - training data [N x (Nv+Nl)]
rate - learning rate
momentum - learning momentum
weight_decay - L2 regularizer
noisy - 0 = use h0_mean, use visible means everywhere
1 = use h0_samp, use visible means everywhere
2 = use samples everywhere
'''
# collect gradient statistics
h0_samp,h0_mean = self.sample_h_given_v(v0_data)
hk_samp = h0_samp
if noisy == 0:
for k in xrange(K): # vk_mean <--> hk_samp
vk_mean = self.mean_field_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_mean)
h0 = h0_mean
vk = vk_mean
hk = hk_mean
elif noisy == 1:
for k in xrange(K): # vk_mean <--> hk_samp
vk_mean = self.mean_field_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_mean)
h0 = h0_samp # <--
vk = vk_mean
hk = hk_mean
elif noisy == 2:
for k in xrange(K): # vk_samp <--> hk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_samp)
h0 = h0_samp
vk = vk_samp # <--
hk = hk_samp # <--
W_grad,bv_grad,bh_grad = self.compute_gradients(v0_data,h0,vk,hk)
if weight_decay > 0.0:
W_grad += weight_decay * self.W
rate = float(rate)
if momentum > 0.0:
momentum = float(momentum)
self.W_update = momentum * self.W_update - rate*W_grad
self.bh_update = momentum * self.bh_update - rate*bh_grad
self.bv_update = momentum * self.bv_update - rate*bv_grad
else:
self.W_update = -rate*W_grad
self.bh_update = -rate*bh_grad
|
dhrone/pydKeg
|
displays/lcd_curses.py
|
Python
|
mit
| 1,770 | 0.039542 |
#!/usr/bin/python
# coding: UTF-8
# Driver for testing pydPiper display
# Uses the curses system to emulate a display
# Written by: Ron Ritchey
import time, curses
import lcd_display_driver
class lcd_curses(lcd_display_driver.lcd_display_driver):
def __init__(self, rows=2, cols=16 ):
self.FONTS_SUPPORTED = False
self.rows = rows
self.cols = cols
self.stdscr = curses.initscr()
self.curx = 0
self.cury = 0
# Set up parent class. Note. This must occur after display has been
# initialized as the parent class may attempt to load custom fonts
super(lcd_curses, self).__init__(rows,cols)
def clear(self):
self.stdscr.clear()
self.stdscr.refresh()
self.curx = 0
self.cury = 0
def setCursor(self, row, col):
self.curx = col
self.cury = row
def loadcustomchars(self, char, fontdata):
# Load custom characters
RuntimeError('Command loadcustomchars not supported')
def cleanup(self):
curses.endwin()
def message(self, text, row=0, col=0):
''' Send string to LCD. Newline wraps to second line'''
self.setCursor(row, col)
self.stdscr.addstr(self.cury, self.
|
curx, text.encode('utf-8'))
self.stdscr.refresh()
def msgtest(self, text, wait=1.5):
self.clear()
lcd.message(text)
time.sleep(wait)
if __name__ == '__main__':
try:
print "Curses Display Test"
lcd = lcd_curses(2,16)
lcd.msgtest("Curses\nPi Powered",2)
lcd.msgte
|
st("This is a driver\nused for testing",2)
accent_min = u"àáâãäçèéëêìíî \nïòóôöøùúûüþÿ"
#for char in accent_min: print char, ord(char)
lcd.msgtest(accent_min,2)
lcd.clear()
except KeyboardInterrupt:
pass
finally:
lcd.clear()
lcd.message("Goodbye!")
time.sleep(2)
lcd.clear()
curses.endwin()
print "Curses Display Test Complete"
|
espressif/esp-idf
|
docs/zh_CN/conf.py
|
Python
|
apache-2.0
| 789 | 0 |
# -*- coding: utf-8 -*-
#
# English Language RTD & Sphinx config file
#
# Uses ../conf_common.py for most non-language-specific settings.
# Importing conf_common adds all the non-language-specific
# parts to this conf module
try:
from conf_common import * # noqa: F403,F401
except ImportError:
import os
import sys
sys.path.insert(0, os.path
|
.abspath('..'))
from conf_common import * # noqa: F403,F401
import datetime
current_year = datetime.datetime.now().year
# General information about the project.
project = u'ESP-IDF 编程指南'
copyright = u'2016 - {} 乐鑫信息科技(上海)股份有限公司'.format(current_year)
# The language for content autogene
|
rated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'zh_CN'
|
nick-huang-cc/GraffitiSpaceTT
|
UnderstandStudyPython/IO_coroutine_stu1.py
|
Python
|
agpl-3.0
| 1,188 | 0.011752 |
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
#Copyright (c) 1986 Nick Wong.
#Copyright (c) 2016-2026 TP-NEW Corp.
# License: TP-NEW (www.tp-new.com)
__author__ = "Nick Wong"
"""
用asyncio提供的@asyncio.coroutine可以把一个generator标记为coroutine类型,然后在coroutine内部用yield from调用另一个coroutine实现异步操作
从Python 3.5开始引入了新的语法async和await,可以让coroutine的代码更简洁易读
#generator(生成器)
#coroutine(协程)
async和await是针对coroutine的新语法,要使用新的语法,只需要做两步简单的替换:
1.把@asyncio.coroutine替换为async;
2.把yield from替换为
|
await。
"""
import asyncio
#########旧代码#########
@asyncio.coroutine
def hello():
print('Hello World!')
r = yield from asyncio.sleep(2)
print('Hello again!')
#########新代码#########
async def hello1(): #注:async后跟的函数不能换行,否则语法错误
print('Hello World! 1')
r = await asyncio.sleep(2)
print('Hello again! 1')
#获取EventLoop:
lo
|
op = asyncio.get_event_loop()
#执行coroutine
loop.run_until_complete(hello())
loop.run_until_complete(hello1())
loop.close()
|
quijot/agrimpy-package
|
agrimpy/test.py
|
Python
|
mit
| 1,621 | 0.001858 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Pasos para transformar proy a geod/proy/GK y exportar a DXF
from geod_proy import *
from toDXF import *
#
# 1) Configurar Proyecciones usadas
#
# 1.1) Proyección Mercator Transversal cualquiera.
lat_orig = gms2gyf(-33,52)
merid_c = gms2gyf(-61,14)
pserapio = config_proy(lat_orig, merid_c)
# 1.2) Gauss-Kruger Faja 5
gk_faja5 = proyGK(5)
#
# 2) Transformar entre geodésicas y proyectadas
#
# 2.1) El proceso es proy -> geod -> proy -> geod. Si se comparan los archivos
# de salida, deberían ser iguales entre ambas "proy" y ambas "geod".
# proy -> proy.geod
proy2geod('coord/proy', pserapio)
# proy.geod -> proy.geod.proy
geod2proy('coord/proy.geod', pserapio)
# proy.geod.proy -> proy.geod.proy.geod
proy2geod('coord/proy.geod.proy', pserapio)
#
# 3) Transformar entre geodésicas y proyectadas (GK-Faja5)
#
# 3.1) El proceso es geod -> gk5 -> geod
|
-> gk5. Si se comparan los archivos de
# salida, deberían ser iguales entre ambas "gk5" y ambas "geod".
# proy.geod -> proy.geod.gk5
geod2proy('coord/proy.geod', gk_faja5, 'gk5')
# proy.geod.gk5 -> proy.geod.gk5.geod
proy2geod('coord/pr
|
oy.geod.gk5', gk_faja5)
# proy.geod.gk5.geod -> proy.geod.gk5.geod.gk5
geod2proy('coord/proy.geod.gk5.geod', gk_faja5, 'gk5')
# proy.geod.gk5.geod.gk5 -> proy.geod.gk5.geod.gk5.geod
proy2geod('coord/proy.geod.gk5.geod.gk5', gk_faja5)
#
# 4) Exportar a DXF
#
# Sólo tiene sentido mandar a DXF las coordenadas proyectadas.
coord2dxf('coord/proy')
coord2dxf('coord/proy.geod.proy')
coord2dxf('coord/proy.geod.gk5')
coord2dxf('coord/proy.geod.gk5.geod.gk5')
|
johncosta/django-like-button
|
setup.py
|
Python
|
bsd-3-clause
| 4,721 | 0.003601 |
import os
import sys
import codecs
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directo
|
ries = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the M
|
IT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
README = read('README.rst')
PACKAGE = "like_button"
VERSION = __import__(PACKAGE).__version__
setup(
name='django-like-button',
version=VERSION,
description='Django App for adding a Facebook like button',
maintainer='John Costa',
maintainer_email='john.costa@gmil.com',
url='https://github.com/johncosta/django-like-button',
classifiers=[
'Programming Language :: Python',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
package_data = find_package_data(PACKAGE, only_in_packages=False),
packages=find_packages(),
long_description = README,
setup_requires = [
'versiontools >= 1.8.2',
],
)
|
MD-Studio/MDStudio
|
mdstudio/mdstudio/logging/impl/printing_observer.py
|
Python
|
apache-2.0
| 805 | 0.003727 |
from datetime import datetime
import os
import pytz
class PrintingLogObserver(object):
def __init__(self, fp):
self.fp = fp
def __call__(self, event):
if event.get('log_format', None):
message = event['log_format'].format(**event)
else:
message = even
|
t.get('message', '')
pid = str(event.get('pid', os.getpid()))
log_struct = {
'time': datetime.fromtimestamp(event['log_time'], pytz.utc).time().replace(microsecond=0).isoformat(),
'pid': pid,
'source': event.get('cb_namespace', event['log_
|
namespace']).split('.')[-1],
'message': message,
'ws': max(0, 35 - len(pid))
}
self.fp.write('{time} [{source:<{ws}} {pid}] {message}\n'.format(**log_struct))
|
amohanta/thug
|
src/Logging/modules/HPFeeds.py
|
Python
|
gpl-2.0
| 6,225 | 0.008193 |
#!/usr/bin/env python
#
# HPFeeds.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import sys
import os
import struct
import socket
import hashlib
import logging
import json
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
log = logging.getLogger("Thug")
class FeedUnpack(object):
def __init__(self):
self.buf = bytearray()
def __iter__(self):
return self
def next(self):
return self.unpack()
def feed(self, data):
self.buf.extend(data)
def unpack(self):
if len(self.buf) < 5:
raise StopIteration('No message')
ml, opcode = struct.unpack('!iB', buffer(self.buf, 0, 5))
if len(self.buf) < ml:
raise StopIteration('No message')
data = bytearray(buffer(self.buf, 5, ml - 5))
del self.buf[:ml]
return opcode, data
class HPFeeds(object):
formats = ('maec11', )
OP_ERROR = 0
OP_INFO = 1
OP_AUTH = 2
OP_PUBLISH = 3
OP_SUBSCRIBE = 4
def __init__(self, thug_version):
self.unpacker = FeedUnpack()
self.opts = dict()
self.url = ""
self.__init_config()
def __init_config(self):
config = ConfigParser.ConfigParser()
conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, 'logging.conf')
config.read(conf_file)
for option in config.options('hpfeeds'):
self.opts[option] = str(config.get('hpfeeds', option))
def set_url(self, url):
self.url = url
def msg_hdr(self, op, data):
return struct.pack('!iB', 5 + len(data), op) + data
def msg_publish(self, chan, data):
#if isinstance(data, str):
# data = data.encode('latin1')
return self.msg_hdr(self.OP_PUBLISH,
struct.pack('!B', len(self.opts['ident'])) +
self.opts['ident'] +
struct.pack('!B', len(chan)) +
chan +
data)
def msg_auth(self, rand):
hash = hashlib.sha1(rand + self.opts['secret']).digest()
return self.msg_hdr(self.OP_AUTH,
struct.pack('!B', len(self.opts['ident'])) +
|
self.opts['ident'] +
hash)
def msg_send(self, msg):
self.sockfd.send(msg)
def get_data(self, host, port):
|
self.sockfd.settimeout(3)
try:
self.sockfd.connect((host, port))
except:
log.warning('[HPFeeds] Unable to connect to broker')
return None
try:
d = self.sockfd.recv(1024)
except socket.timeout:
log.warning('[HPFeeds] Timeout on banner')
return None
self.sockfd.settimeout(None)
return d
def publish_data(self, d, chan, pubdata):
published = False
while d and not published:
self.unpacker.feed(d)
for opcode, data in self.unpacker:
if opcode == self.OP_INFO:
rest = buffer(data, 0)
name, rest = rest[1:1 + ord(rest[0])], buffer(rest, 1 + ord(rest[0]))
rand = str(rest)
self.msg_send(self.msg_auth(rand))
self.msg_send(self.msg_publish(chan, pubdata))
published = True
self.sockfd.settimeout(0.1)
if opcode == self.OP_ERROR:
log.warning('[HPFeeds] Error message from server: {0}'.format(data))
try:
d = self.sockfd.recv(1024)
except socket.timeout:
break
def __log_event(self, pubdata):
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = self.get_data(self.opts['host'], int(self.opts['port']))
if data is None:
return
self.publish_data(data, 'thug.events', pubdata)
self.sockfd.close()
def log_event(self, basedir):
if log.ThugOpts.local:
return
m = None
for module in self.formats:
if module in log.ThugLogging.modules:
p = log.ThugLogging.modules[module]
m = getattr(p, 'get_%s_data' % (module, ), None)
if m:
break
if m is None:
return
data = m(basedir)
self.__log_event(data)
def log_file(self, pubdata, url = None, params = None):
if log.ThugOpts.local:
return
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = self.get_data(self.opts['host'], int(self.opts['port']))
if data is None:
return
self.publish_data(data, 'thug.files', json.dumps(pubdata))
self.sockfd.close()
def log_warning(self, pubdata):
if log.ThugOpts.local:
return
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = self.get_data(self.opts['host'], int(self.opts['port']))
if data is None:
return
self.publish_data(data, 'thug.warnings', json.dumps({'url': self.url, 'warning': pubdata}))
self.sockfd.close()
if __name__ == '__main__':
hpfeeds = HPFeeds()
hpfeeds.log_event('Test foobar!')
|
dhazel/buck
|
bucking/logvolume_2.py
|
Python
|
gpl-2.0
| 2,031 | 0.031019 |
#This is a function containing an algorithmic model of the Scribner log rule,
# board-foot log volume tables. It outputs the Scribner log volume for an
# input log length and top diameter.
#
# Annotation: [v]=logvolume(L,TD)
# v = Scribner log volume
# L = log length
# TD = top diameter
import sys
volume_table_1 = [1.07,4.9,6.043,7.14,8.88,10.0,11.528,13.29,14.99,17.499,18.99,20.88,23.51,25.218,28.677,31.249,34.22,36.376,38.04,41.06,44.376,45.975]
volume_table_2 = [1.160,1.400,1.501,2.084,3.126,3.749 , 1.249,1.608,1.854,2.410,3.542,4.167 , 1.57,1.8,2.2,2.9,3.815,4.499]
def logvolume_2(L,TD):
L = L - (0.8333) #Account for 10 inch over cut
if TD < 5:
L = 0 # makes v = 0 in the output
print "Top diameter reached:", TD
TD = 11 # handles out-of-bounds errors
print "Error! Top diameter minimum limit of 5 inches."
elif TD >= 32:
print 'Error! %3.1f inch top diameter exceeds the current 32.0 inch program capability.\n' %TD
L = 0
TD = 11
elif L > 40:
print "Log length reached:", L
L = 0
print 'Error! Maximum log length is 40 feet.'
elif L < 1:
print "Log length reached:", L
L = 0
print 'Error! Minimum log length is 16 feet.'
if (TD >= 6) & (TD <= 11):
TD = TD - 6 # normalize TD with 6 for array indexing
if L < 16:
v = 10 * round((L * volume_table_2[TD]) / 10.0)
elif
|
L < 31:
v = 10 * round((L * volume_table_2[TD + 6]) / 10.0)
elif L < 41:
v = 10 * round((L * volume_table_2[TD + 12]) / 10.0)
else:
v = 0
else:
if TD == 5:
|
v = 10 * round((L * volume_table_1[0]) / 10.0)
else:
v = 10 * round((L * volume_table_1[TD - 11]) / 10.0)
return v
def debug_logvolume():
print
v = logvolume_2(input("length: "),input("topdia: "))
print "volume is:", v
|
Cjsheaf/Variation-Discovery-Pipeline
|
Pipeline_Core/Results_Writer.py
|
Python
|
gpl-2.0
| 3,199 | 0.003439 |
__author__ = 'Cjsheaf'
import csv
from threading import Lock
class ResultsWriter:
""" This class is designed to take out-of-order result data from multiple threads and write
them to an organized csv-format file.
All data is written to disk at the very end via the "write_results" method, since there
is no way to know how many results there will be ahead of time, and they will not arrive
in any particular order.
"""
def __init__(self, csv_filename):
self.csv_filename = csv_filename
self.entries = {}
def put_rmsd(self, entry_name, rmsd):
if self.entries.get(entry_name) is None:
self.entries[entry_name] = Entry(entry_name)
self.entries.rmsd = rmsd
def put_compound_scores(self, entry_name, scores):
""" Argument 'scores' should be a 9-item tuple or list. """
if len(scores) is not 9:
raise ValueError(
'Attempted to save results for a compound "{compound}" in entry "{entry}"'
'with {num_scores} number of results. Expected 9 results.'.format(
compound=scores(0),
entry=entry_name,
num_scores=len(scores)
)
)
if self.entries.get(entry_name) is None:
self.entries[entry_name] = Entry(entry_name)
self.entries[entry_name].compounds.append(
Compound(scores[1], scores[2], scores[3], scores[4], scores[5], scores[6], scores[7],
scores[8], scores[9])
)
def _sanity_check_entry(self):
for e in self.entries:
if e.rmsd is None:
raise RuntimeError('Entry "{entry}" has no RMSD!'.format(entry=e.name))
if len(e.compounds) is 0:
raise RuntimeError('Entry "{entry}" has no compounds!'.format(entry=e.name))
for c in e.compounds:
if c.mseq is None:
raise NotImplementedError
def write_results(self):
csv_file = open(self.csv_filename, 'w', newline='')
writer = csv.writer(self.csv_file)
writer.writerow('name', 'rmsd', 'compound', 'rseq', 'mseq', 'rmsd_refine', 'e_conf',
'e_place', 'e_score1', 'e_score2', 'e_refine')
for e in self.ent
|
ries:
writer.writerow(e.name, e.rmsd)
for c in e.compounds:
writer.writerow('', '', c.name, c.rseq, c.mseq, c.rmsd_refine, c.e_conf, c.e_place,
c.e_score1, c.e_score2, c.e_refine)
class Entry:
def __init__(self, name):
self.name = name
self.rmsd = None
self.compounds = []
def add_compound(self
|
, compound_name, compound):
self.compounds[compound_name] = compound
class Compound:
def __init__(self, name, rseq, mseq, rmsd_refine, e_conf, e_place, e_score1, e_score2, e_refine):
self.name = name
self.rseq = rseq
self.mseq = mseq
self.rmsd_refine = rmsd_refine
self.e_conf = e_conf
self.e_place = e_place
self.e_score1 = e_score1
self.e_score2 = e_score2
self.e_refine = e_refine
|
MrColwell/PythonProfessionalLearning
|
PythonForTeachers/studentExercises/8_2_average.py
|
Python
|
mit
| 339 | 0.0059 |
total = 0
n = 0
stop = 0
nextMark = input('Type in a mark: ')
while stop == 0:
nextMark = eval(nextMark)
total = total+nextMark
n = n + 1
nextMark = input('Hit enter to stop, or type in a mark: ')
if nextMark == "":
stop = 1
print("You entered", n, 'marks
|
. The average is:',total/n)
| |
namhyung/uftrace
|
tests/t159_report_diff_policy2.py
|
Python
|
gpl-2.0
| 2,877 | 0.004866 |
#!/usr/bin/env python
import subprocess as sp
from runtest import TestBase
XDIR='xxx'
YDIR='yyy'
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'diff', """
#
# uftrace diff
# [0] base: xxx (from uftrace record -d yyy -F main tests/t-diff 1 )
# [1] diff: yyy (from uftrace record -d xxx -F main tests/t-diff 0 )
#
Total time (diff) Self time (diff) Calls (diff) Function
====
|
=============================== =================================== ================================ ================================================
1.075 us 1.048 us
|
-0.027 us 1.075 us 1.048 us -0.027 us 1 1 +0 atoi
158.971 us 0.118 us -158.853 us 1.437 us 0.118 us -1.319 us 1 1 +0 bar
1.235 ms 0.645 us -1.235 ms 3.276 us 0.527 us -2.749 us 1 1 +0 foo
1.309 ms 3.975 us -1.305 ms 2.601 us 2.282 us -0.319 us 1 1 +0 main
1.300 ms - -1.300 ms 1.300 ms - -1.300 ms 3 0 -3 usleep
""")
def prerun(self, timeout):
self.subcmd = 'record'
self.option = '-d %s -F main' % XDIR
self.exearg = 't-' + self.name + ' 0'
record_cmd = self.runcmd()
self.pr_debug('prerun command: ' + record_cmd)
sp.call(record_cmd.split())
self.option = '-d %s -F main' % YDIR
self.exearg = 't-' + self.name + ' 1'
record_cmd = self.runcmd()
self.pr_debug('prerun command: ' + record_cmd)
sp.call(record_cmd.split())
return TestBase.TEST_SUCCESS
def setup(self):
self.subcmd = 'report'
self.option = '--diff-policy full,no-abs -s call,total'
self.exearg = '-d %s --diff %s' % (YDIR, XDIR)
def sort(self, output):
""" This function post-processes output of the test to be compared .
It ignores blank and comment (#) lines and remaining functions. """
result = []
for ln in output.split('\n'):
if ln.startswith('#') or ln.strip() == '':
continue
line = ln.split()
if line[0] == 'Total':
continue
if line[0].startswith('='):
continue
# A report line consists of following data
# [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] [14] [15]
# tT/0 unit tT/1 unit tT/d unit tS/0 unit tS/1 unit tS/d unit call/0 call/1 call/d function
if line[-1].startswith('__'):
continue
result.append('%s %s %s %s' % (line[-4], line[-3], line[-2], line[-1]))
return '\n'.join(result)
|
fontman/fontman-server
|
utility/Initializer.py
|
Python
|
gpl-3.0
| 428 | 0 |
""" Initializer
Initialize application data.
Created by Lahiru Pathirage @ Mooniak<lpsandaruwan@gmail.com> on 19/12/2016
"""
from session import Base
from session import mysql_con_string
from sqlalchemy import create_engine
from utility import DBManager
def initialize():
engine = create_engine(
mysql_
|
con_string
)
Base.metadata.create_all(engine, checkfirst=True)
DBManager().update
|
_font_cache()
|
zhaochl/python-utils
|
es/elasticsearch_util.py
|
Python
|
apache-2.0
| 4,771 | 0.032907 |
#!/usr/bin/env python
# coding=utf-8
import commands
import sys
from docopt import docopt
#from handler import LogFileClient
from sdutil.log_util import getLogger
from sdutil.date_util import *
reload(sys)
sys.setdefaultencoding('utf-8')
from elasticsearch import Elasticsearch
from pdb import *
import requests
import json
logger = getLogger(__name__, __file__)
"""
host like:"http://172.17.0.33:8081"
"""
def count_from_es(host,index,query_str,startTime,endTime,scroll=False):
logger.info('search_from_es startTime:%s,endTime:%s'%(startTime,endTime))
startTimeStamp = int(str2timestamp(startTime))*1000
endTimeStamp = int(str2timestamp(endTime))*1000+999
data_post_search = {"query":{"filtered":{"query":{"query_string":{"query":query_str,"analyze_wildcard":'true'}},"filter":{"bool":{"must":[{"range":{"@timestamp":{"gte":startTimeStamp,"lte":endTimeStamp,"format":"epoch_millis"}}}],"must_not":[]}}}}}
logger.info('search_from_es,post_data:%s'%(data_post_search))
es = Elasticsearch(host,timeout=120)
response = es.count(index=index, body=data_post_search)
return response
def do_search(host,index,query_str,startTimeStamp,endTimeStamp,scroll,_source,time_step):
es = Elasticsearch(host,timeout=120)
response ={}
data_post_search = {"query":{"filtered":{"query":{"query_string":{"query":query_str,"analyze_wildcard":'true'}},"filter":{"bool":{"must":[{"range":{"@timestamp":{"gte":startTimeStamp,"lte":endTimeStamp,"format":"epoch_millis"}}}],"must_not":[]}}}}}
logger.info('search_from_es,post_data:%s'%(data_post_search))
if not scroll:
if _source:
response = es.search(index=index, body=data_post_search,size=10000,_source=_source)
else:
response = es.search(index=index, body=data_post_search,size=10000)
else:
page_size=10000
scan_resp =None
if _source:
scan_resp = es.search(index=index, body=data_post_search,search_type="scan", scroll="5m",size=page_size,_source=_source)
else:
scan_resp = es.search(index=index, body=data_post_search,search_type="scan", scroll="5m",size=page_size)
scrollId= scan_resp['_scroll_id']
response={}
total = scan_resp['hits']['total']
response_list =[]
scrollId_list =[]
for page_num in range(total/page_size + 1):
response_tmp ={}
response_tmp = es.scroll(scroll_id=scrollId, scroll= "5m")
#es.clear_scroll([scrollId])
scrollId = response_tmp['_scroll_id']
scrollId_list.append(str(scrollId))
response_list.append(response_tmp)
if response.has_key('hits'):
_hits = response['hits']
_hits['hits']+=response_tmp['hits']['hits']
response['hits'] = _hits
else:
response['hits'] = response_tmp['hits']
return response
def search_from_es(host,index,query_str,startTime,endTime,scroll=False,_source=None,time_step=0):
logger.info('search_from_es startTime:%s,endTime:%s'%(startTime,endTime))
startTimeStamp = int(str2timestamp(startTime))*1000
endTimeStamp = int(str2timestamp(endTime))*1000+999
all_response={}
timegap = endTimeStamp-startTimeS
|
tamp
if time_step>0:
_s1=startTimeStamp
|
_s2=startTimeStamp+time_step
run_time =0
all_response = {}
time_count = {}
while(_s2<=endTimeStamp):
response_tmp = do_search(host,index,query_str,_s1,_s2,scroll,_source,time_step)
#response_tmp = do_search(_s1,_s2)
if all_response.has_key('hits'):
_hits = all_response['hits']
_hits['hits']+=response_tmp['hits']['hits']
all_response['hits'] = _hits
else:
all_response['hits'] = response_tmp['hits']
run_time+=1
_s1=_s1+time_step
_s2 = _s2+time_step
if time_count.has_key(_s1):
time_count[_s1]+=1
else:
time_count[_s1]=1
if time_count.has_key(_s2):
time_count[_s2]+=1
else:
time_count[_s2]=1
print '----run_time:',run_time,'_s1:',_s1,',_s2:',_s2,',len:',len(all_response['hits']['hits'])
print '-s1--',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_s1/1000))
print '-s2--',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_s2/1000))
print time_count
time.sleep(2)
else:
all_response = do_search(host,index,query_str,startTimeStamp,endTimeStamp,scroll,_source,time_step)
return all_response
|
oregoncountryfair/ocfnet
|
ocfnet/migrations/env.py
|
Python
|
mit
| 2,236 | 0.001342 |
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from ocfnet.database import Model
from ocfnet.media.models import *
from ocfnet.user.models import *
try:
from config import DATABASE_URL
except:
from configdist import DATABASE_URL
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
alembic_confi
|
g = config.get_section(config.config_ini_section)
alembic_config['sqlalchemy.url'] = DATABASE_URL
engine = engine_from_config(alembic_config, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if cont
|
ext.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
FireFry/online-judge-solutions
|
acm.timus.ru/1385.py
|
Python
|
gpl-2.0
| 166 | 0.024096 |
from sys import stdin, stdout
n = int(stdin.read())
if n == 1:
res = "14"
elif n == 2:
res = "155"
else:
|
res = "1575" + ("0" * (n - 3))
stdout.write(re
|
s + "\n")
|
andsens/ansible-modules-extras
|
messaging/rabbitmq_user.py
|
Python
|
gpl-3.0
| 7,797 | 0.001411 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rabbitmq_user
short_description: Adds or removes users to RabbitMQ
description:
- Add or remove users to RabbitMQ and assign permissions
version_added: "1.1"
author: Chris Hoffman
options:
user:
description:
- Name of user to add
required: true
default: null
aliases: [username, name]
password:
description:
- Password of user to add.
- To change the password of an existing user, you must also specify
C(force=yes).
required: false
default: null
tags:
description:
- User tags specified as comma delimited
required: false
default: null
vhost:
description:
- vhost to apply access privileges.
required: false
default: /
node:
description:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
version_added: "1.2"
configure_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
required: false
default: ^$
write_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
required: false
default: ^$
read_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
required: false
default: ^$
force:
description:
- Deletes and recreates the user.
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if user is to be added or removed
required: false
default: present
choices: [prese
|
nt, absent]
'''
EXAMPLES = ''
|
'
# Add user to server and assign full access control
- rabbitmq_user: user=joe
password=changeme
vhost=/
configure_priv=.*
read_priv=.*
write_priv=.*
state=present
'''
class RabbitMqUser(object):
def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node):
self.module = module
self.username = username
self.password = password
self.node = node
if tags is None:
self.tags = list()
else:
self.tags = tags.split(',')
permissions = dict(
vhost=vhost,
configure_priv=configure_priv,
write_priv=write_priv,
read_priv=read_priv
)
self.permissions = permissions
self._tags = None
self._permissions = None
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self.node]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get(self):
users = self._exec(['list_users'], True)
for user_tag in users:
user, tags = user_tag.split('\t')
if user == self.username:
for c in ['[',']',' ']:
tags = tags.replace(c, '')
if tags != '':
self._tags = tags.split(',')
else:
self._tags = list()
self._permissions = self._get_permissions()
return True
return False
def _get_permissions(self):
perms_out = self._exec(['list_user_permissions', self.username], True)
for perm in perms_out:
vhost, configure_priv, write_priv, read_priv = perm.split('\t')
if vhost == self.permissions['vhost']:
return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv)
return dict()
def add(self):
if self.password is not None:
self._exec(['add_user', self.username, self.password])
else
self._exec(['add_user', self.username, ''])
self._exec(['clear_password', self.username])
def delete(self):
self._exec(['delete_user', self.username])
def set_tags(self):
self._exec(['set_user_tags', self.username] + self.tags)
def set_permissions(self):
cmd = ['set_permissions']
cmd.append('-p')
cmd.append(self.permissions['vhost'])
cmd.append(self.username)
cmd.append(self.permissions['configure_priv'])
cmd.append(self.permissions['write_priv'])
cmd.append(self.permissions['read_priv'])
self._exec(cmd)
def has_tags_modifications(self):
return set(self.tags) != set(self._tags)
def has_permissions_modifications(self):
return self._permissions != self.permissions
def main():
arg_spec = dict(
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None),
tags=dict(default=None),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default='rabbit')
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
read_priv = module.params['read_priv']
force = module.params['force']
state = module.params['state']
node = module.params['node']
rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node)
changed = False
if rabbitmq_user.get():
if state == 'absent':
rabbitmq_user.delete()
changed = True
else:
if force:
rabbitmq_user.delete()
rabbitmq_user.add()
rabbitmq_user.get()
changed = True
if rabbitmq_user.has_tags_modifications():
rabbitmq_user.set_tags()
changed = True
if rabbitmq_user.has_permissions_modifications():
rabbitmq_user.set_permissions()
changed = True
elif state == 'present':
rabbitmq_user.add()
rabbitmq_user.set_tags()
rabbitmq_user.set_permissions()
changed = True
module.exit_json(changed=changed, user=username, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
|
anurag03/integration_tests
|
artifactor/plugins/test.py
|
Python
|
gpl-2.0
| 705 | 0 |
""" Test plugin for Artifactor """
import time
from artifactor import ArtifactorBasePlugin
class Test(ArtifactorBasePlugin):
def plugin_initialize(self):
self.register_plugin_hook("start_test", self.start_test)
self.register_plugi
|
n_hook("finish_test", self.finish_test)
def start_test(self, test_name, test_location, artifact_path):
filename = artifact_path + "-" + self.ident + ".log"
with open(filename, "a+") as f:
f.write(test_name + "\n")
f.write(str(time.time()) + "\n")
for i in range(2):
time.sleep(2)
|
print("houh")
def finish_test(self, test_name, artifact_path):
print("finished")
|
moepman/acertmgr
|
acertmgr/tools.py
|
Python
|
isc
| 18,467 | 0.003466 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# acertmgr - various support functions
# Copyright (c) Markus Hauschild & David Klaftenegger, 2016.
# Copyright (c) Rudolf Mayerhofer, 2019.
# available under the ISC license, see LICENSE
import base64
import datetime
import io
import os
import re
import stat
import sys
import traceback
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, ec, padding
from cryptography.hazmat.primitives.asymmetric.utils import decode_dss_signature
from cryptography.utils import int_to_bytes
from cryptography.x509.oid import NameOID, ExtensionOID
try:
from cryptography.x509 import ocsp
except ImportError:
pass
try:
from cryptography.hazmat.primitives.asymmetric import ed25519, ed448
except ImportError:
pass
try:
from urllib.request import urlopen, Request # Python 3
except ImportError:
from urllib2 import urlopen, Request # Python 2
LOG_REPLACEMENTS = {}
class InvalidCertificateError(Exception):
pass
# @brief a simple, portable indent function
def indent(text, spaces=0):
ind = ' ' * spaces
return os.linesep.join(ind + line for line in text.splitlines())
# @brief wrapper for log output
def log(msg, exc=None, error=False, warning=False):
if error:
prefix = "Error: "
elif warning:
prefix = "Warning: "
else:
prefix = ""
output = prefix + msg
for k, v in LOG_REPLACEMENTS.items():
output = output.replace(k, v)
if exc:
_, exc_value, _ = sys.exc_info()
if not getattr(exc, '__traceback__', None) and exc == exc_value:
# Traceback handling on Python 2 is ugly, so we only output it if the exception is the current sys one
formatted_exc = traceback.format_exc()
else:
formatted_exc = traceback.format_exception(type(exc), exc, getattr(exc, '__traceback__', None))
exc_string = ''.join(formatted_exc) if isinstance(formatted_exc, list) else str(formatted_exc)
output += os.linesep + indent(exc_string, len(prefix))
if error or warning:
sys.stderr.write(output + os.linesep)
sys.stderr.flush() # force flush buffers after message was written for immediate display
else:
sys.stdout.write(output + os.linesep)
sys.stdout.flush() # force flush buffers after message was written for immediate display
# @brief wrapper for downloading an url
def get_url(url, data=None, headers=None):
return urlopen(Request(url, data=data, headers={} if headers is None else headers))
# @brief check whether existing certificate is still valid or expiring soon
# @param crt_file string containing the path to the certificate file
# @param ttl_days the minimum amount of days for which the certificate must be valid
# @return True if certificate is still valid for at least ttl_days, False otherwise
def is_cert_valid(cert, ttl_days):
now = datetime.datetime.now()
if cert.not_valid_before > now:
raise InvalidCertificateError("Certificate seems to be from the future")
expiry_limit = now + datetime.timedelta(days=ttl_days)
if cert.not_valid_after < expiry_limit:
return False
return True
# @brief create a certificate signing request
# @param names list of domain names the certificate should be valid for
# @param key the key to use with the certificate in pyopenssl format
# @param must_staple whether or not the certificate should include the OCSP must-staple flag
# @return the CSR in pyopenssl format
def new_cert_request(names, key, must_staple=False):
primary_name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME,
names[0].decode('utf-8') if getattr(names[0], 'decode', None) else
names[0])])
all_names = x509.SubjectAlternativeName(
[x509.DNSName(name.decode('utf-8') if getattr(name, 'decode', None) else name) for name in names])
req = x509.CertificateSigningRequestBuilder()
req = req.subject_name(primary_name)
req = req.add_extension(all_names, critical=False)
if must_staple:
if getattr(x509, 'TLSFeature', None):
req = req.add_extension(x509.TLSFeature(features=[x509.TLSFeatureType.status_request]), critical=False)
else:
log('OCSP must-staple ignored as current version of cryptography does not support the flag.', warning=True)
req = req.sign(key, hashes.SHA256(), default_backend())
return req
# @brief generate a new account key
# @param path path where the new key file should be written in PEM format (optional)
def new_account_key(path=None, key_algo=None, key_size=None):
return new_ssl_key(path, key_algo, key_size)
# @brief generate a new ssl key
# @param path path where the new key file should be written in PEM format (optional)
def new_ssl_key(path=None, key_algo=None, key_size=None):
if not key_algo or key_algo.lower() == 'rsa':
if not key_size:
key_size = 4096
key_format = serialization.PrivateFormat.TraditionalOpenSSL
private_key = rsa.generate_private_key(
|
public_exponent=65537,
key_size=key_size,
|
backend=default_backend()
)
elif key_algo.lower() == 'ec':
if not key_size or key_size == 256:
key_curve = ec.SECP256R1
elif key_size == 384:
key_curve = ec.SECP384R1
elif key_size == 521:
key_curve = ec.SECP521R1
else:
raise ValueError("Unsupported EC curve size parameter: {}".format(key_size))
key_format = serialization.PrivateFormat.PKCS8
private_key = ec.generate_private_key(curve=key_curve, backend=default_backend())
elif key_algo.lower() == 'ed25519' and "cryptography.hazmat.primitives.asymmetric.ed25519":
key_format = serialization.PrivateFormat.PKCS8
private_key = ed25519.Ed25519PrivateKey.generate()
elif key_algo.lower() == 'ed448' and "cryptography.hazmat.primitives.asymmetric.ed448":
key_format = serialization.PrivateFormat.PKCS8
private_key = ed448.Ed448PrivateKey.generate()
else:
raise ValueError("Unsupported key algorithm: {}".format(key_algo))
if path is not None:
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=key_format,
encryption_algorithm=serialization.NoEncryption(),
)
with io.open(path, 'wb') as pem_out:
pem_out.write(pem)
if hasattr(os, 'chmod'):
try:
os.chmod(path, int("0400", 8))
except OSError:
log('Could not set file permissions on {0}!'.format(path), warning=True)
else:
log('Keyfile permission handling unavailable on this platform', warning=True)
return private_key
# @brief read a key from file
# @param path path to file
# @param key indicate whether we are loading a key
# @param csr indicate whether we are loading a csr
# @return the key in pyopenssl format
def read_pem_file(path, key=False, csr=False):
with io.open(path, 'r') as f:
if key:
return serialization.load_pem_private_key(f.read().encode('utf-8'), None, default_backend())
elif csr:
return x509.load_pem_x509_csr(f.read().encode('utf8'), default_backend())
else:
return convert_pem_str_to_cert(f.read())
# @brief write cert data to PEM formatted file
def write_pem_file(crt, path, perms=None):
if hasattr(os, 'chmod') and os.path.exists(path):
try:
os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
except OSError:
log('Could not make file ({0}) writable'.format(path), warning=True)
with io.open(path, "w") as f:
f.write(convert_cert_to_pem_str(crt))
if perms:
if hasattr(os, 'chmod'):
try:
os.chmod(path, perms)
except OSError:
log('Could not set file permiss
|
wikimedia/integration-zuul
|
tests/test_daemon.py
|
Python
|
apache-2.0
| 2,085 | 0 |
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance
|
with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distr
|
ibuted under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daemon
import logging
import os
import sys
import extras
import fixtures
import testtools
from tests.base import iterate_timeout
# as of python-daemon 1.6 it doesn't bundle pidlockfile anymore
# instead it depends on lockfile-0.9.1 which uses pidfile.
pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile'])
def daemon_test(pidfile, flagfile):
pid = pid_file_module.TimeoutPIDLockFile(pidfile, 10)
with daemon.DaemonContext(pidfile=pid):
for x in iterate_timeout(30, "flagfile to be removed"):
if not os.path.exists(flagfile):
break
sys.exit(0)
class TestDaemon(testtools.TestCase):
log = logging.getLogger("zuul.test.daemon")
def setUp(self):
super(TestDaemon, self).setUp()
self.test_root = self.useFixture(fixtures.TempDir(
rootdir=os.environ.get("ZUUL_TEST_ROOT"))).path
def test_daemon(self):
pidfile = os.path.join(self.test_root, "daemon.pid")
flagfile = os.path.join(self.test_root, "daemon.flag")
open(flagfile, 'w').close()
if not os.fork():
self._cleanups = []
daemon_test(pidfile, flagfile)
for x in iterate_timeout(30, "daemon to start"):
if os.path.exists(pidfile):
break
os.unlink(flagfile)
for x in iterate_timeout(30, "daemon to stop"):
if not os.path.exists(pidfile):
break
|
trustedanalytics/spark-tk
|
python/sparktk/frame/pyframe.py
|
Python
|
apache-2.0
| 926 | 0.001179 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the
|
License.
#
class PythonFrame(object):
"""frame backend using a Python objects: pyspark.rdd.RDD, [(str, dtype), (str, dtype), ...]"""
def __init__(self, rdd, sch
|
ema=None):
self.rdd = rdd
self.schema = schema
|
quiqueporta/django-admin-dialog
|
django_admin_dialog/__init__.py
|
Python
|
gpl-2.0
| 502 | 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literal
|
s, print_function
VERSION = (1, 0, 8, 'final')
__version__ = VERSION
def get_versio
|
n():
version = '{}.{}'.format(VERSION[0], VERSION[1])
if VERSION[2]:
version = '{}.{}'.format(version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '{} pre-alpha'.format(version)
else:
if VERSION[3] != 'final':
version = '{} {}'.format(version, VERSION[3])
return version
|
miqueiaspenha/gerenciadordeprovas
|
quiz/__init__.py
|
Python
|
gpl-2.0
| 60 | 0.05 |
from
|
flask import Flask
app = Flask(__name_
|
_)
import views
|
sergiopasra/django-megaraetc
|
etc/models.py
|
Python
|
gpl-3.0
| 1,005 | 0.002985 |
from django.db import models
class SpectralTemplate(models.Model):
name = models.CharField(max_length=10)
path = models.CharField(max_length=100)
def __str__(self):
return self.name
class PhotometricFilter(models.Model):
name = models.CharField(max_length=10)
path = models.CharField(max_length=100)
cwl = models.FloatField()
width = models.FloatField()
lambda_b = models.FloatField()
lambda_e = models.Float
|
Field()
mvega = models.FloatField()
fvega = models.FloatField()
def __str__(self):
return self.name
class VPHSetup(models.Model):
name = models.CharField(max_length=10)
fwhm = models.Floa
|
tField()
dispersion = models.FloatField()
deltab = models.FloatField()
lambdac = models.FloatField()
relatedband = models.CharField(max_length=10)
lambda_b = models.FloatField()
lambda_e = models.FloatField()
specconf = models.CharField(max_length=10)
def __str__(self):
return self.name
|
stryder199/RyarkAssignments
|
Assignment2/ttt/archive/_old/KnR/KnR_1-10.py
|
Python
|
mit
| 1,389 | 0.009359 |
game_type = 'input_output'
parameter_list = [['$x1','string'], ['$y0','string']]
tuple_list = [
['KnR_1-10_',[None,None]]
]
global_code_template = '''\
d #include <stdio.h>
x #include <stdio.h>
dx #define MAXLINE 1000 /* maximum input line length */
dx
dx int max; /* maximum length seen so far */
dx char line[MAXLINE]; /* current input line */
dx char longest[MAXLINE]; /* longest line saved here */
dx
dx int my_getline(void);
dx void copy(void);
dx
dx /* my_getline: specialized version */
dx int my_getline(void)
dx {
dx int c, i;
dx extern char line[];
dx
dx for (i = 0; i < MAXLINE - 1
dx && (c=getchar()) != EOF && c != '\\n'; ++i)
dx line[i] = c;
dx if (c == '\\n') {
dx line[i] = c;
dx ++i;
dx }
d
|
x line[i] = '\\0';
dx return i;
dx }
dx
dx /* copy: specialized version */
dx void copy(void)
dx {
dx int i;
dx extern char line[], longest[];
dx
dx i = 0;
dx while ((longest[i] = line[i]) != '\\0')
dx ++i;
dx }
dx
dx /* print longest input line; specialized version */
'''
main_code_template = '''\
dx int len;
dx extern int max;
dx extern char longest[];
dx
dx max = 0;
dx while ((len = my_getline()) > 0)
dx if (len > max) {
dx
|
max = len;
dx copy();
dx }
dx if (max > 0) /* there was a line */
dx printf("%s", longest);
'''
argv_template = ''
stdin_template = '''
a
$x1
abc
'''
stdout_template = '''\
$y0
'''
|
Yelp/beans
|
api/tests/logic/secret_test.py
|
Python
|
mit
| 573 | 0 |
import json
import pytest
fr
|
om ye
|
lp_beans.logic.secret import get_secret
def test_get_secret_file(tmpdir, database):
with tmpdir.as_cwd():
expected = 'password'
with open(tmpdir.join('client_secrets.json').strpath, 'w') as secrets:
secret = {'secret': expected}
secrets.write(json.dumps(secret))
actual = get_secret('secret')
assert expected == actual
def test_get_secret_file_no_exist(tmpdir, database):
with tmpdir.as_cwd():
with pytest.raises(IOError):
assert get_secret('secret')
|
ikekonglp/TweeboParser
|
scripts/AugumentBrownClusteringFeature46.py
|
Python
|
gpl-3.0
| 2,802 | 0.003212 |
# Copyright (c) 2013-2014 Lingpeng Kong
# All Rights Reserved.
#
# This file is part of TweeboParser 1.0.
#
# TweeboParser 1.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TweeboParser 1.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
#
|
You should have received a copy of the GNU Lesser General Public License
#
|
along with TweeboParser 1.0. If not, see <http://www.gnu.org/licenses/>.
# Lingpeng Kong, lingpenk@cs.cmu.edu
# Oct 12, 2013
# The Brown Clustering usage for Dependency Parsing can be read from Koo et al (ACL 08)
# http://people.csail.mit.edu/maestro/papers/koo08acl.pdf
# Oct 27, 2013
# Add case-sensitive choice
# Jan 4, 2014
# Add 4 bits, 6 bits and all bits.
# May 24, 2014
# Add codecs to support utf-8
import sys
import codecs
def usage():
print("Usage: AugumentBrownClusteringFeature.py [Brown_Clustering_Dictionary] " \
"[Input_Conll_File] [Y/N(case-sensitive)] > [Output_file]")
print("Example: AugumentBrownClusteringFeature.py paths input.txt > output.txt")
print("The program will add two kind of Strings at the end, the first one is the first 4 " \
"bit of the Brown Cluster label and the second one is the whole Brown Cluster label.")
if __name__ == "__main__":
if len(sys.argv) != 4:
usage()
sys.exit(2)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
brown_dict = dict()
brown_file = open(sys.argv[1].strip(), "r")
for line in brown_file:
line = line.strip()
if line == "":
continue
bl = line.split("\t")
brown_dict[bl[1]] = bl[0]
#print brown_dict['upstage/downstage']
inputf = sys.argv[2].strip()
for line in codecs.open(inputf, "r", "utf-8"):
line = line.strip()
if line == "":
sys.stdout.write("\n")
continue
cvlist = line.split("\t")
if sys.argv[3] == "N":
brown = brown_dict.get(cvlist[1].lower().strip(), 'OOV')
else:
brown = brown_dict.get(cvlist[1].strip(), 'OOV')
b4 = brown[:4] if len(brown) >= 4 else brown
b6 = brown[:6] if len(brown) >= 6 else brown
cvlist.append(b4)
cvlist.append(b6)
cvlist.append(brown)
tline = ""
for ele in cvlist:
tline = tline + ele + "\t"
tline = tline[:len(tline) - 1]
print(tline)
|
ministryofjustice/postcodeinfo
|
postcodeinfo/apps/postcode_api/migrations/0011_auto_20150702_1812.py
|
Python
|
mit
| 394 | 0 |
# -*- coding: utf-8 -*-
from __
|
future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('postcode_api', '0010_auto_20150601_1513'),
]
operations = [
migrations.AlterIndexTogether(
name='address',
index_together=set([('postcode_index', 'uprn')]),
),
]
| |
kennknowles/python-rightarrow
|
rightarrow/parser.py
|
Python
|
apache-2.0
| 6,194 | 0.008072 |
import sys
import os.path
import logging
import ply.yacc
from rightarrow.annotations import *
from rightarrow.lexer import Lexer
logger = logging.getLogger(__name__)
class Parser(object):
tokens = Lexer.tokens
def __init__(self, debug=False, lexer_class=None):
self.debug = debug
self.lexer_class = lexer_class or Lexer # Crufty but works around statefulness in PLY
def parse(self, string, lexer = None):
lexer = lexer or self.lexer_class()
return self.parse_token_stream(lexer.tokenize(string))
def parse_token_stream(self, token_iterator, start_symbol='ty'):
# Since PLY has some crufty aspects and dumps files, we try to keep them local
# However, we need to derive the name of the output Python file :-/
output_directory = os.path.dirname(__file__)
try:
module_name = os.path.splitext(os.path.split(__file__)[1])[0]
except:
module_name = __name__
parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab'])
# And we regenerate the parse table every time; it doesn't actually take that long!
new_parser = ply.yacc.yacc(module=self,
debug=self.debug,
tabmodule = parsing_table_module,
outputdir = output_directory,
write_tables=0,
start = start_symbol,
errorlog = logger)
return new_parser.parse(lexer = IteratorToTokenStream(token_iterator))
# ===================== PLY Parser specification =====================
precedence = [
('right', 'ARROW'),
('left', '|'),
]
def p_error(self, t):
|
raise Exception('Parse error at %s:%s near token %s (%s)' % (t.lineno, t.col, t.value, t.type))
def p_empty(self, p):
'empty :'
pass
def p_ty_parens(self, p):
"ty : '(' ty ')'"
p[0] = p[2]
def p_ty_var(self, p):
"ty : TYVAR"
p[0] = Variable(p[1])
def p_ty_union(self, p):
"ty : ty '|' ty"
p
|
[0] = Union([p[1], p[3]])
def p_ty_bare(self, p):
"ty : bare_arg_ty"
p[0] = p[1]
def p_ty_funty_bare(self, p):
"ty : ty ARROW ty"
p[0] = Function(arg_types=[p[1]], return_type=p[3])
def p_ty_funty_complex(self, p):
"ty : '(' maybe_arg_types ')' ARROW ty"
argument_types=p[2]
return_type=p[5]
# Check here whether too many kwarg or vararg types are present
# Each item in the list uses the dictionary encoding of tagged variants
arg_types = [argty['arg_type'] for argty in argument_types if 'arg_type' in argty]
vararg_types = [argty['vararg_type'] for argty in argument_types if 'vararg_type' in argty]
kwarg_types = [argty['kwarg_type'] for argty in argument_types if 'kwarg_type' in argty]
if len(vararg_types) > 1:
raise Exception('Argument list with multiple vararg types: %s' % argument_types)
if len(kwarg_types) > 1:
raise Exception('Argument list with multiple kwarg types: %s' % argument_types)
# All the arguments that are not special
p[0] = Function(arg_types=arg_types,
vararg_type=vararg_types[0] if len(vararg_types) > 0 else None,
kwarg_type=kwarg_types[0] if len(kwarg_types) > 0 else None,
kwonly_arg_types=None,
return_type=return_type)
# Because a bare function type is equivalent to a single argument in parens, it is not
# parsed by this rule
def p_maybe_arg_types(self, p):
'''
maybe_arg_types : arg_types ',' arg_ty
| empty
'''
p[0] = [] if len(p) == 2 else p[1] + [p[3]]
# Executive decision is this: kwargs and varargs get to be elements of this list ANYWHERE
# and we check later, to avoid any parsing issues with commas
def p_arg_types_single(self, p):
'''
arg_types : arg_types ',' arg_ty
| arg_ty
'''
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[3]]
def p_arg_ty_normal(self, p):
"arg_ty : ty"
p[0] = { 'arg_type' : p[1] }
def p_arg_ty_vararg(self, p):
"arg_ty : '*' ty"
p[0] = { 'vararg_type' : p[2] }
def p_arg_ty_kwarg(self, p):
"arg_ty : KWARG ty"
p[0] = { 'kwarg_type' : p[2] }
# Special types that never require parenthesis
def p_bare_arg_ty(self, p):
"""
bare_arg_ty : identifier_ty
| dict_ty
| list_ty
| object_ty
| any_ty
"""
p[0] = p[1]
def p_identifier_ty(self, p):
"identifier_ty : ID"
p[0] = NamedType(p[1])
def p_list_ty(self, p):
"list_ty : '[' ty ']'"
p[0] = List(elem_ty=p[2])
def p_dict_ty(self, p):
"dict_ty : '{' ty ':' ty '}'"
p[0] = Dict(key_ty=p[2], value_ty=p[4])
def p_any_ty(self, p):
"any_ty : ANY"
p[0] = Any()
def p_object_ty(self, p):
"""
object_ty : OBJECT '(' ID ')'
| OBJECT '(' ID ',' obj_fields ')'
"""
field_types = {} if len(p) == 5 else p[5]
p[0] = Object(p[3], **field_types)
def p_obj_fields(self, p):
"""
obj_fields : obj_fields ',' obj_field
| obj_field
"""
p[0] = dict([p[1]] if len(p) == 2 else p[1] + [p[3]]) # Note: no checking for dupe fields at the moment
def p_obj_field(self, p):
"obj_field : ID ':' ty"
p[0] = (p[1], p[3])
class IteratorToTokenStream(object):
def __init__(self, iterator):
self.iterator = iterator
def token(self):
try:
return self.iterator.next()
except StopIteration:
return None
if __name__ == '__main__':
logging.basicConfig()
parser = Parser(debug=True)
print parser.parse(sys.stdin.read())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.