repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
kubeflow/pipelines
|
components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_pig_job.py
|
Python
|
apache-2.0
| 1,768 | 0.007353 |
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_pig_job
MODULE = 'kfp_component.google.dataproc._submit_pig_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitPigJob(unittest.TestCase):
def test_submit_pig_job_with_expected_payload(self, mock_submit_job):
submit_pig_job('mock-project', 'mock-region', 'mock-cluster',
job_id_output_path='/tmp/kfp/output/dataproc/job_id.txt',
queries=['select * from mock_table'],
script_variables={'var-1': 'value1'},
pig_job={ 'continueOnFailure': True },
job={ 'labels': {'key1': 'value1'}})
|
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
|
'pigJob': {
'queryList': { 'queries': [
'select * from mock_table'
]},
'scriptVariables': {'var-1': 'value1'},
'continueOnFailure': True
},
'labels': {
'key1': 'value1'
}
}, 30, job_id_output_path='/tmp/kfp/output/dataproc/job_id.txt')
|
samsath/skeleton
|
src/website/calendar/management/commands/import_weather.py
|
Python
|
gpl-3.0
| 1,499 | 0.006004 |
import json
import requests
from django.conf import settings
from website.calendar.models import WeatherTypes, Calendar
from django.conf import settings
from datetime import datet
|
ime
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Import the weather information"
def handle(self, **options):
params = {
'key':sett
|
ings.APIXU_KEY,
'q':settings.APIXU_LOCATION,
'days':settings.APIXU_DAYS
}
r = requests.get('https://{0}'.format(settings.APIXU_URL), params=params)
if r.status_code == 200:
data = r.json()
for day in data['forecast']['forecastday']:
try:
date = datetime.strptime(day['date'], '%Y-%m-%d').timestamp()
sun_rise = datetime.strptime(day['astro']['sunrise'], '%I:%M %p').time()
sun_set = datetime.strptime(day['astro']['sunset'], '%I:%M %p').time()
temp = day['day']['maxtemp_c']
weath, creat = WeatherTypes.objects.get_or_create(
title=day['day']['condition']['text'],
class_code=day['day']['condition']['code'],
icon=day['day']['condition']['icon'],)
Calendar.data.addWeather(int(date),sun_rise, sun_set, temp, weath)
except Exception as exp:
print(exp)
else:
print(r.status_code)
|
chakki-works/elephant_sense
|
scripts/features/charactor_extractor.py
|
Python
|
apache-2.0
| 3,901 | 0.002072 |
import re
from bs4 import BeautifulSoup
from scripts.features.feature_extractor import FeatureExtractor
from scripts.data.cleaning import clean_code
def clean_html_tags(html_text):
soup = BeautifulSoup(html_text, 'html.parser')
if soup.find("h") is not None:
soup.find("h").extract()
cleaned_text = soup.get_text()
cleaned_text = ''.join(cleaned_text.splitlines())
return cleaned_text
def clean_code
|
(html_text):
"""Qiitaのコードを取り除きます
:param html_text:
:return:
"""
soup = BeautifulSoup(html_text, 'html.parser')
[x.extract() for x in soup.findAll(class_="code-frame")]
[x.extract() for x in soup.findAll("code")]
cleaned_text = soup.get_text()
cleaned_text = ''.join(cleaned_text.splitlines())
return cl
|
eaned_text
def cleaning(text):
replaced_text = clean_code(html_text=text) # remove source code
replaced_text = clean_html_tags(html_text=replaced_text) # remove html tag
replaced_text = re.sub(r'\$.*?\$+', '', replaced_text) # remove math equation
replaced_text = re.sub(r'[@@]\w+', '', replaced_text) # remove @mention
replaced_text = re.sub(r'https?:\/\/.*?([\r\n ]|$)', '', replaced_text) # remove URL
replaced_text = re.sub(r' ', '', replaced_text) # remove zenkaku space
return replaced_text
class RenderedBodyPreprocessor():
def clean_rendered_body(self, rendered_body):
cleaned_rendered_body = cleaning(rendered_body)
return cleaned_rendered_body
class CharacterRatio():
def __init__(self, regex_text, text):
self.regex = regex_text
self.text = text
def character_ratio(self):
pattern = re.compile(self.regex)
count = len(re.findall(pattern, self.text))
ratio = 0 if len(self.text) == 0 else count / len(self.text)
return ratio
class KanjiRatioExtractor(FeatureExtractor):
def __init__(self, cleaned_rendered_body):
self.regex_text = '[一-龥]'
self.character_ratio = CharacterRatio(self.regex_text, cleaned_rendered_body)
def extract(self, post, extracted=None):
ratio = self.character_ratio.character_ratio()
return ratio
class HiraganaRatioExtractor(FeatureExtractor):
def __init__(self, cleaned_rendered_body):
self.regex_text = '[ぁ-ん]'
self.character_ratio = CharacterRatio(self.regex_text, cleaned_rendered_body)
def extract(self, post, extracted=None):
ratio = self.character_ratio.character_ratio()
return ratio
class KatakanaRatioExtractor(FeatureExtractor):
def __init__(self, cleaned_rendered_body):
self.regex_text = '[ァ-ン]'
self.character_ratio = CharacterRatio(self.regex_text, cleaned_rendered_body)
def extract(self, post, extracted=None):
ratio = self.character_ratio.character_ratio()
return ratio
class AlphabetRatioExtractor(FeatureExtractor):
def __init__(self, cleaned_rendered_body):
self.regex_text = '[a-xA-Z]'
self.character_ratio = CharacterRatio(self.regex_text, cleaned_rendered_body)
def extract(self, post, extracted=None):
ratio = self.character_ratio.character_ratio()
return ratio
class NumberRatioExtractor(FeatureExtractor):
def __init__(self, cleaned_rendered_body):
self.regex_text = '[0-9]'
self.character_ratio = CharacterRatio(self.regex_text, cleaned_rendered_body)
def extract(self, post, extracted=None):
ratio = self.character_ratio.character_ratio()
return ratio
class PunctuationRatioExtractor(FeatureExtractor):
def __init__(self, cleaned_rendered_body):
self.regex_text = '[、]'
self.character_ratio = CharacterRatio(self.regex_text, cleaned_rendered_body)
def extract(self, post, extracted=None):
ratio = self.character_ratio.character_ratio()
return ratio
|
akatsoulas/mozillians
|
lib/jinjautils.py
|
Python
|
bsd-3-clause
| 1,577 | 0 |
# TODO: let's see if we can get rid of this, it's garbage
from django.contrib.admin import options, actions, sites
from django.template import loader
import jingo
def django_to_jinja(template_name, context, **kw):
"""
We monkeypatch Django admin's render_to_response to work in our Jinja
environment. We have an admin/base_site.html template that Django's
templates inherit, but instead of rendering html, it renders the Django
pieces into a Jinja template. We get all of Django's html, but wrapped in
our normal site structure.
"""
context_instance = kw.pop('context_instance')
source = loader.render_to_string(template_name, context, context_instance)
request = context_instance['request']
return jingo.render(request, jingo.env.from_string(source))
actions.render_to_response = django_to_jinja
options.render_to_response = django_to_jinja
sites.render_to_response = django_to_jinja
def jinja_for_django(template_name, context=None, **kw):
"""
If you want to use some built in logic (or a contrib app) but need to
override the templates to work with Jinja, replace the object's
render_to_response function with this one. That will render a Jinja
template through Django
|
's functions. An example can be found in the users
app.
"""
if context is None:
context = {}
context_instance = kw.pop('context_instance')
request = context_instance['request']
for d in context_instance.dicts:
context.update(d)
return jingo.render(request, template_name, context, **kw)
| |
MoebiuZ/OpcodeOne
|
oldfiles/oldcode/tools/assemblerold/functions.py
|
Python
|
apache-2.0
| 5,835 | 0.048517 |
import re
import sys
import struct
import codecs
class REMatcher(object):
def __init__(self, matchstring):
self.matchstring = matchstring
def match(self,regexp):
self.rematch = re.match(regexp, self.matchstring, re.IGNORECASE)
return bool(self.rematch)
def group(self,i):
return self.rematch.group(i)
def groups(self):
return self.rematch.groups()
class Assembler:
def checkInCode(self):
if not self.in_code:
print "Error in line " + str(self.line_count) + ": Instruction not in code section"
exit()
def checkInData(self):
if self.in_code:
print "Error in line " + str(self.line_count) + " Instruction not in data section"
exit()
def writefile(self, file):
with open(file, "wb") as output:
for byte in self.code:
output.write(byte)
output.close()
def push24(self, num):
b = bytearray(struct.pack('>I', num))
self.code.append(chr(b[1]))
self.code.append(chr(b[2]))
self.code.append(chr(b[3]))
def push8(self, num):
self.code.append(chr(num))
def newlabel(self, label):
if label != None:
if label in self.labels:
self.printerror("Label " + m.group(2) + ": is duplicated")
else:
self.labels[label] = self.inst_addr
def enqueuelabel(self, group, position):
# if relative:
# self.labelpass.append({ "label": group.strip("\(\)"), "position": self.inst_addr+1, "inst_address": self.inst_addr })
# else:
self.labelpass.append({ "label": group.strip("\(\)"), "position": position })
def printerror(self, text):
print "Error in line " + str(self.line_count) + ": " + text
exit()
def parse_line(self, line):
line = line.strip(" \t\n\r")
# Remove comments if not inside string
if re.match(".*?#.*", line):
if not re.match("[^']*'[^#]*#[^']*", line):
line = re.sub("\s*#.*", "", line)
m = REMatcher(line.strip(" \t\n\r"))
if line == '': # Empty line
pass
elif m.match("(\w+)\:\Z"): # Labels
if m.group(1) in self.labels:
self.printerror("Label \'" + m.group(1) + ":\'' is duplicated")
else:
self.labels[m.group(
|
1)] = self.inst_addr
elif m.match("\.code\Z"): # Section.code
self.in_code = True
elif m.match("\.data\Z"): # Section .data
self.in_code = False
elif m.match(self.LABEL + "\.DS\s+(?:\'|\")(.+)(?:\'|\")\Z"): # Data String
self.checkInData()
self.newlabel(m.group(1))
i = 0
for char in m.group(2):
|
self.push8( ord(char.encode('latin-1')) )
i += 1
if i % 3 == 0:
self.inst_addr += 1
self.push8(0x00) # String terminator
i += 1
# Fix word alignment
while i % 3 != 0:
self.push8(0x00)
i += 1
self.inst_addr += 1
elif m.match(self.LABEL + "\.DW\s+(" + self.HEX + ")\Z"): # Data Word
self.checkInData()
self.newlabel(m.group(1))
self.push24(int(m.group(2), 0))
self.inst_addr += 1
elif m.match(self.LABEL + "CALL\Z" + self.sep + "(" + self.HEX + "|\(" + self.ALPHANUMERIC + "\))"):
self.newlabel(m.group(1))
self.inst_CALL(m)
elif m.match(self.LABEL + "LD" + self.spc +
"(" + self.REG + ")" + self.sep +
"(" + self.REG + "|" + self.HEX + "|" + self.INT + "|\(" + self.ALPHANUMERIC + "\))"
):
self.newlabel(m.group(1))
self.inst_LD(m)
elif m.match(self.LABEL + "DBG" + self.spc + "(" + self.REG + ")"):
self.newlabel(m.group(1))
self.inst_DBG(m)
elif m.match(self.LABEL + "HALT\Z"):
self.newlabel(m.group(1))
self.inst_HALT(m)
elif m.match(self.LABEL + "MR" + self.spc +
"(" + self.REG + ")" + self.sep +
"(\[" + self.REG + "\]|" + self.HEX + "|\(" + self.ALPHANUMERIC + "\))" +
self.OFFSET + "\Z"
):
self.newlabel(m.group(1))
self.inst_MR(m)
elif m.match(self.LABEL + "MW" + self.spc + "(\[" + self.REG + "\]|" + self.HEX + "|\(" + self.ALPHANUMERIC + "\))" + self.OFFSET + self.sep + "(" + self.REG + ")\Z"):
self.newlabel(m.group(1))
self.inst_MW(m)
elif m.match(self.LABEL + "NOP\Z"):
self.newlabel(m.group(1))
self.inst_NOP(m)
elif m.match(self.LABEL + "POP\Z" + self.sep + "(" + self.REG + ")"):
self.newlabel(m.group(1))
self.inst_POP(m)
elif m.match(self.LABEL + "PUSH\Z" + self.sep + "(" + self.REG + ")"):
self.newlabel(m.group(1))
self.inst_PUSH(m)
elif m.match(self.LABEL + "RET\Z"):
self.newlabel(m.group(1))
self.inst_RET(m)
elif m.match(self.LABEL + "VR" + self.spc + "(" + self.REG + ")" + self.sep + "(\[" + self.REG + "\]|" + self.HEX + ")" + self.OFFSET + "\Z"):
self.newlabel(m.group(1))
self.inst_VR(m)
elif m.match(self.LABEL + "VW" + self.spc + "(\[" + self.REG + "\]|" + self.HEX + ")" + self.OFFSET + self.sep + "(" + self.REG + ")\Z"):
self.newlabel(m.group(1))
self.inst_VW(m)
else:
self.printerror("Syntax error")
self.line_count += 1
if self.inst_addr > 0xffffff:
print "Error: The assembled binary will excess the maximum size of 0xffffff words"
exit()
def second_pass(self):
for item in self.labelpass:
if item['label'] not in self.labels:
print "Label '" + item['label'] + "' doesn't exist"
exit()
#if item['inst_address'] != None:
# b = bytearray(struct.pack('>I', self.labels[item['label']] - item['inst_address']))
#else:
b = bytearray(struct.pack('>I', self.labels[item['label']]))
addr = item['position']*3
self.code[addr] = chr(b[1])
self.code[addr+1] = chr(b[2])
self.code[addr+2] = chr(b[3])
def assemble(self, file):
self.line_count = 1;
self.instructions = []
self.code = []
self.labelpass = []
self.labels = dict()
self.inst_addr = 0
self.in_code = True
with codecs.open(file, 'r', encoding='utf-8') as source_file:
for line in source_file:
self.parse_line(line)
source_file.close()
self.second_pass()
|
tartley/pyweek11-cube
|
source/view/modelview.py
|
Python
|
bsd-3-clause
| 682 | 0.001466 |
from _
|
_future__ import division
from pyglet.gl import gl, glu
class ModelView(object):
'''
Manage modelview matrix, performing the MVC's 'view' parts of the 'camera'
'''
def __init__(self, camera):
self.camera = camera
def set_identity(self):
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
def set_world(self):
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
position = self.camera.position
|
look_at = self.camera.look_at
glu.gluLookAt(
position.x, position.y, position.z,
look_at.x, look_at.y, look_at.z,
0, 1, -1)
|
vinicius-ronconi/WeatherForecast
|
WeatherForecast/wsgi.py
|
Python
|
mit
| 408 | 0 |
"""
WSGI config for WeatherForecast project.
It exposes the WSGI callable as a module-level var
|
iable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_ws
|
gi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WeatherForecast.settings")
application = get_wsgi_application()
|
Parallel-in-Time/pySDC
|
pySDC/projects/AllenCahn_Bayreuth/run_temp_forcing_benchmark.py
|
Python
|
bsd-2-clause
| 5,134 | 0.002922 |
from argparse import ArgumentParser
import numpy as np
from mpi4py import MPI
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_Temp_MPIFFT import allencahn_temp_imex
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
from pySDC.projects.AllenCahn_Bayreuth.AllenCahn_dump import dump
def run_simulation(name=None, nprocs_space=None):
"""
A simple test program to do PFASST runs for the AC equation
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_size = space_comm.Get_size()
space_rank = space_comm.Get_rank()
# split world communicator to create time-communicators
if nprocs_space is not None:
color = int(world_rank % nprocs_space)
else:
color = int(world_rank / world_size)
time_comm = comm.Split(color=color)
time_size = time_comm.Get_size()
time_rank = time_comm.Get_rank()
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 1E-03
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 16.0
problem_params['nvars'] = [(48 * 48, 48 * 48), (8 * 48, 8 * 48)]
problem_params['eps'] = [0.04]
problem_params['radius'] = 0.25
problem_params['TM'] = 1.0
problem_params['D'] = 1.0
problem_params['dw'] = [300.0]
problem_params['comm'] = space_comm
problem_params['name'] = name
problem_params['init_type'] = 'circle_rand'
problem_params['spectral'] = True
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank
controller_params['hook_class'] = dump
controller_params['predict_type'] = 'fine_only'
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = fft_to_fft
description['problem_class'] = allencahn_temp_imex
# set time parameters
t0 = 0.0
Tend = 100 * 0.001
if space_rank == 0 and time_rank == 0:
out = f'---------> Running {name} with {time_size} process(es) in time and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
if space_rank == 0:
print()
# convert filtered statistics to list of iterations count, sorted by time
iter_counts = sort_stats(filter_stats(stats, type='niter'), sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = f'Mean number of iterations on rank {time_rank}: {np.mean(niters):.4f}'
print(out)
timing = sort_stats(filter_stats(stats, type='timi
|
ng_setup'), sortby='time')
out = f'Setup time on rank {time_rank}: {timing[0][1]:.4f} sec.'
print(out)
timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')
out = f'Time to solution on rank {time_rank}: {timing[0][1]:.4f} sec.'
print(out)
if __name__ == "__main__":
# Add parser to get number of processors in space and setup (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.
|
add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
name = 'AC-bench-tempforce'
run_simulation(name=name, nprocs_space=args.nprocs_space)
|
bayespy/bayespy
|
bayespy/inference/vmp/nodes/gate.py
|
Python
|
mit
| 7,980 | 0.001504 |
################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
"""
import numpy as np
from bayespy.utils import misc
from .node import Node, Moments
from .deterministic import Deterministic
from .categorical import CategoricalMoments
from .concatenate import Concatenate
class Gate(Deterministic):
"""
Deterministic gating of one node.
Gating is performed over one plate axis.
Note: You should not use gating for several variables which parents of a
same node if the gates use the same gate assignments. In such case, the
results will be wrong. The reason is a general one: A stochastic node may
not be a parent of another node via several paths unless at most one path
has no other stochastic nodes between them.
"""
def __init__(self, Z, X, gated_plate=-1, moments=None, **kwargs):
"""
Constructor for the gating node.
Parameters
----------
Z : Categorical-like node
A variable which chooses the index along the gated plate axis
X : node
The node whose plate axis is gated
gated_plate : int (optional)
The index of the plate axis to be gated (by default, -1, that is,
the last axis).
"""
if gated_plate >= 0:
raise ValueError("Cluster plate must be negative integer")
self.gated_plate = gated_plate
if moments is not None:
X = self._ensure_moments(
X,
moments.__class__,
**moments.get_instance_conversion_kwargs()
)
if not isinstance(X, Node):
raise ValueError("X must be a node or moments should be provided")
X_moments = X._moments
self._moments = X_moments
dims = X.dims
if len(X.plates) < abs(gated_plate):
raise ValueError("The gated node does not have a plate axis is "
"gated")
K = X.plates[gated_plate]
Z = self._ensure_moments(Z, CategoricalMoments, categories=K)
self._parent_moments = (Z._moments, X_moments)
if Z.dims != ( (K,), ):
raise ValueError("Inconsistent number of clusters")
self.K = K
super().__init__(Z, X, dims=dims, **kwargs)
def _compute_moments(self, u_Z, u_X):
"""
"""
u = []
for i in range(len(u_X)):
# Make the moments of Z and X broadcastable and move the gated plate
# to be the last axis in the moments, then sum-product over that
# axis
ndim = len(self.dims[i])
z = misc.add_trailing_axes(u_Z[0], ndim)
z = misc.moveaxis(z, -ndim-1, -1)
gated_axis = self.gated_plate - ndim
if np.ndim(u_X[i]) < abs(gated_axis):
x = misc.add_trailing_axes(u_X[i], 1)
else:
x = misc.moveaxis(u_X[i], gated_axis, -1)
ui = misc.sum_product(z, x, axes_to_sum=-1)
u.append(ui)
return u
def _compute_message_to_parent(self, index, m_child, u_Z, u_X):
"""
"""
if index == 0:
m0 = 0
# Compute Child * X, sum over variable axes and move the gated axis
# to be the last. Need to do some shape changing in order to make
# Child and X to broadcast properly.
for i in range(len(m_child)):
ndim = len(self.dims[i])
c = m_child[i][...,None]
c = misc.moveaxis(c, -1, -ndim-1)
gated_axis = self.gated_plate - ndim
x = u_X[i]
if np.ndim(x) < abs(gated_axis):
x = np.expand_dims(x, -ndim-1)
else:
x = misc.moveaxis(x, gated_axis, -ndim-1)
axes = tuple(range(-ndim, 0))
m0 = m0 + misc.sum_product(c, x, axes_to_sum=axes)
# Make sure the variable axis does not use broadcasting
m0 = m0 * np.ones(self.K)
# Send the message
m = [m0]
return m
elif index == 1:
m = []
for i in range(len(m_child)):
# Make the moments of Z and the message from children
# broadcastable. The gated plate is handled as the last axis in
# the arrays and moved to the correct position at the end.
# Add variable axes to Z moments
ndim = len(self.dims[i])
z = misc.add_trailing_axes(u_Z[0], ndim)
z = misc.moveaxis(z, -ndim-1, -1)
# Axis index of the gated plate
gated_axis = self.gated_plate - ndim
# Add the gate axis to the message from the children
c = misc.add_trailing_axes(m_child[i], 1)
# Compute the message to parent
mi = z * c
# Add extra axes if necessary
if np.ndim(mi) < abs(gated_axis):
mi = misc.add_leading_axes(mi,
abs(gated_axis) - np.ndim(mi))
# Move the axis to the correct position
mi = misc.moveaxis(mi, -1, gated_axis)
m.append(mi)
return m
else:
raise ValueError("Invalid parent index")
def _compute_weights_to_parent(self, index, weights):
"""
"""
if index == 0:
return weights
elif index == 1:
if self.gated_plate >= 0:
raise ValueError("Gated plate axis must be negative")
return (
np.expand_dims(weights, axis=self.gated_plate)
if np.ndim(weights) >= abs(self.gated_plate) else
weights
)
else:
raise ValueError("Invalid parent index")
def _compute_plates_to_parent(self, index, plates):
"""
"""
if index == 0:
return plates
elif index == 1:
plates = list(plates)
# Add the cluster plate axis
if self.gated_plate < 0:
knd = len(plates) + self.gated_plate + 1
else:
raise RuntimeError("Cluster plate axis must be negative")
plates.insert(knd, self.K)
return tuple(plates)
else:
raise ValueError("Invalid parent index")
def _compute_plates_from_parent(self, index, plates):
"""
"""
if index == 0:
return plates
elif index == 1:
plates = list(plates)
# Remove the cluster plate, if the parent has it
if len(plates) >= abs(self.gated_plate):
plates.pop(self.gated_plate)
return tuple(plates)
else:
raise ValueError("Invalid parent index")
def Choose(z, *nodes):
"""Choose plate elements from nodes based on a categorical variable.
For instance:
.. testsetup::
from bayespy.nodes import *
.. code-block:: python
>>> import bayespy as bp
>>> z = [0, 0, 2, 1]
>>> x0 = bp.nodes.GaussianARD(0, 1)
>>> x1 = bp.nodes.GaussianARD(10, 1)
>>> x2 = bp.nodes.GaussianARD(20, 1)
>>> x = bp.nodes.Choose(z, x0, x1, x2)
>>> print(x.get_moments()[0])
[ 0. 0. 20. 10.]
This is basically just a thin wrapper over applying Gate node over the
concatenation of the nodes.
"""
categories = len(nodes)
z = Det
|
ermi
|
nistic._ensure_moments(
z,
CategoricalMoments,
categories=categories
)
nodes = [node[...,None] for node in nodes]
combined = Concatenate(*nodes)
return Gate(z, combined)
|
kostyll/Cryptully
|
cryptully/qt/qNickInputWidget.py
|
Python
|
gpl-3.0
| 2,500 | 0.0024 |
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QLineEdit
from PyQt4.QtGui import QMessageBox
from PyQt4.QtGui import QPixmap
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QWidget
import qtUtils
from utils import constants
from utils import errors
from utils import utils
class QNickInputWidget(QWidget):
def __init__(self, image, imageWidth, connectClickedSlot, nick='', parent=None):
QWidget.__init__(self, parent)
self.connectClickedSlot = connectClickedSlot
# Image
self.image = QLabel(self)
self.image.setPixmap(QPixmap(qtUt
|
ils.getAbsoluteImagePath(image)).scaledToWidth(imageWidth, Qt.SmoothTransformation))
# Nick field
self.nickLabel = QLabel("Nickname:", self)
self.nickEdit = QLineEdit(nick, self)
self.nickEdit.setMaxLength(constants.NICK_MAX_LEN)
self.nickEdit.returnPressed.connect(self.__connectClicked)
|
# Connect button
self.connectButton = QPushButton("Connect", self)
self.connectButton.resize(self.connectButton.sizeHint())
self.connectButton.setAutoDefault(False)
self.connectButton.clicked.connect(self.__connectClicked)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.nickLabel)
hbox.addWidget(self.nickEdit)
hbox.addStretch(1)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox)
vbox.addWidget(self.connectButton)
vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.image)
hbox.addSpacing(10)
hbox.addLayout(vbox)
hbox.addStretch(1)
self.setLayout(hbox)
def __connectClicked(self):
nick = str(self.nickEdit.text()).lower()
# Validate the given nick
nickStatus = utils.isValidNick(nick)
if nickStatus == errors.VALID_NICK:
self.connectClickedSlot(nick)
elif nickStatus == errors.INVALID_NICK_CONTENT:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_CONTENT)
elif nickStatus == errors.INVALID_NICK_LENGTH:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_LENGTH)
elif nickStatus == errors.INVALID_EMPTY_NICK:
QMessageBox.warning(self, errors.TITLE_EMPTY_NICK, errors.EMPTY_NICK)
|
cloudbase/nova
|
nova/tests/unit/policy_fixture.py
|
Python
|
apache-2.0
| 4,908 | 0.000204 |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import six
import nova.conf
from nova.conf import paths
from nova import policies
import nova.policy
from nova.tests.unit import fake_policy
CONF = nova.conf.CONF
class RealPolicyFixture(fixtures.Fixture):
"""Load the live policy for tests.
A base policy fixture that starts with the assumption that you'd
like to load and enforce the shipped default policy in tests.
Provides interfaces to tinker with both the contents and location
of the policy file before loading to allow overrides. To do this
|
implement ``_prepare_policy`` in the subclass, and adjust the
``policy_file`` accordingly.
"""
def _prepare_policy(self):
"""Allow changing of the policy before we get started"""
pass
def setUp(self):
super(RealPolicyFixture, self).setUp()
# policy_file can be overridden by subclasses
self.policy_file = paths.state_path_def('etc/nova/policy.json')
self._prepare_policy()
CONF.set_override('policy_file', self.policy_file, group='oslo
|
_policy')
nova.policy.reset()
nova.policy.init()
self.addCleanup(nova.policy.reset)
def set_rules(self, rules):
policy = nova.policy._ENFORCER
policy.set_rules(oslo_policy.Rules.from_dict(rules))
def add_missing_default_rules(self, rules):
"""Adds default rules and their values to the given rules dict.
The given rulen dict may have an incomplete set of policy rules.
This method will add the default policy rules and their values to
the dict. It will not override the existing rules.
"""
for rule in policies.list_rules():
if rule.name not in rules:
rules[rule.name] = rule.check_str
class PolicyFixture(RealPolicyFixture):
"""Load a fake policy from nova.tests.unit.fake_policy
This overrides the policy with a completely fake and synthetic
policy file.
NOTE(sdague): the use of this is deprecated, and we should unwind
the tests so that they can function with the real policy. This is
mostly legacy because our default test instances and default test
contexts don't match up. It appears that in many cases fake_policy
was just modified to whatever makes tests pass, which makes it
dangerous to be used in tree. Long term a NullPolicy fixture might
be better in those cases.
"""
def _prepare_policy(self):
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path,
'policy.json')
# load the fake_policy data and add the missing default rules.
policy_rules = jsonutils.loads(fake_policy.policy_data)
self.add_missing_default_rules(policy_rules)
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy_rules, f)
CONF.set_override('policy_dirs', [], group='oslo_policy')
class RoleBasedPolicyFixture(RealPolicyFixture):
"""Load a modified policy which allows all actions only be a single roll.
This fixture can be used for testing role based permissions as it
provides a version of the policy which stomps over all previous
declaration and makes every action only available to a single
role.
NOTE(sdague): we could probably do this simpler by only loading a
single default rule.
"""
def __init__(self, role="admin", *args, **kwargs):
super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs)
self.role = role
def _prepare_policy(self):
with open(CONF.oslo_policy.policy_file) as fp:
policy = fp.read()
policy = jsonutils.loads(policy)
self.add_missing_default_rules(policy)
# Convert all actions to require specified role
for action, rule in six.iteritems(policy):
policy[action] = 'role:%s' % self.role
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy, f)
|
CognizantOneDevOps/Insights
|
PlatformAgents/com/cognizant/devops/platformagents/agents/alm/qtest/QtestAgent.py
|
Python
|
apache-2.0
| 18,777 | 0.002929 |
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
from ....core.BaseAgent import BaseAgent
import json
import math
import datetime
from dateutil import parser
class QtestAgent (BaseAgent):
@BaseAgent.timed
def process(self):
baseUrl = self.config.get('baseUrl', '')
userName = self.getCredential('userid')
password = self.getCredential('passwd')
startFrom = self.config.get('startFrom') + '+00:00'
startFromDate = parser.parse(startFrom, ignoretz=True)
pageSize = self.config.get('responsePageSize', 100)
dynamicTemplate = self.config.get("dynamicTemplate", {})
almEntities = dynamicTemplate.get('almEntities', {})
metadata = dynamicTemplate.get("almEntityMetaData", None)
isHistoryApi = self.config.get('isHistoryApi', False)
automationType = dict()
idChunkSize = 10
if isHistoryApi:
automationConfig = dynamicTemplate.get("automationType", {})
automationType = automationConfig.get("test-cases", automationType)
idChunkSize = self.config.get('historyIdChunkSize', idChunkSize)
testRunT
|
ypes = dynamicTemplate.get('testRunsType', {})
if 'test-runs' in almEntities:
for testRunType in testRunTypes:
almEntities[testRunType] = almEntities.get('test-runs', {})
almEntities.pop('test-runs', {})
payloadConfig = dict()
for entityType in almEntities:
payloadConfig[entityType] = dict()
payload = dict()
|
payload['fields'] = ['*']
entity = entityType
if entityType in testRunTypes:
testRunType = testRunTypes[entityType]
payload['query'] = testRunType.get('query') + " and " + "'Last Modified Date' >= '%s'"
entity = 'test-runs'
payloadConfig[entityType]['automation'] = testRunType.get('automation')
else:
payload['query'] = "'Last Modified Date' >= '%s'"
payload['object_type'] = entity
payloadConfig[entityType]['payload'] = json.dumps(payload)
payloadConfig[entityType]['entity'] = entity
encodeKey = 'InSightsAlmAgent:'
authKey = base64.b64encode(encodeKey.encode('utf-8'))
token = self.login(baseUrl, userName, password, authKey)
bearerToken = 'bearer ' + token if token else None
apiHeaders = {'Content-Type': 'application/json', 'accept': 'application/json', 'Authorization': bearerToken}
projectData = self.getResponse(baseUrl + "/api/v3/projects?assigned=false", 'GET', None, None, None, None, apiHeaders)
injectData = dict()
try:
for project in projectData:
projectId = project.get('id', -1)
projectIdStr = str(projectId)
projectName = project.get('name', '')
projectUrl = baseUrl + '/api/v3/projects/' + projectIdStr
searchUrl = projectUrl + '/search?page={0}&pageSize={1}'
historyUrl = projectUrl + "/histories?page={0}&pageSize={1}"
if projectIdStr not in self.tracking:
self.tracking[projectIdStr] = dict()
projectTrackingDetails = self.tracking[projectIdStr]
injectData['projectId'] = projectId
injectData['projectName'] = projectName
for entity in payloadConfig:
idList = list()
toolsData = list()
responseTemplate = almEntities[entity]
if entity not in projectTrackingDetails:
lastTracked = startFrom
lastTrackedDate = startFromDate
projectTrackingDetails[entity] = dict()
else:
lastTracked = projectTrackingDetails[entity].get('lastModificationDate', startFrom)
lastTrackedDate = parser.parse(lastTracked, ignoretz=True)
nextResponse, page = True, 1
pageSetFlag, totalPage = False, 0
entityConfig = payloadConfig[entity]
payload = entityConfig['payload'] % lastTracked
injectData['almType'] = entityConfig['entity']
if 'automation' in entityConfig:
injectData['automation'] = payloadConfig[entity]['automation']
while nextResponse:
response = dict()
try:
url = searchUrl.format(page, pageSize)
response = self.getResponse(url, 'POST', None, None, payload, None, apiHeaders)
if not pageSetFlag:
total = response.get('total', 0)
totalPage = int(math.ceil(float(total) / 100))
pageSetFlag = True
except Exception as err:
self.baseLogger.error(err)
responseData = response.get('items', None)
if responseData:
for response in responseData:
lastModified = response.get('last_modified_date', None)
lastModifiedDate = parser.parse(lastModified, ignoretz=True)
if lastModifiedDate > lastTrackedDate:
lastTrackedDate = lastModifiedDate
lastTracked = lastModified
responseId = response.get('id')
idList.append(responseId)
if injectData['almType'] == 'requirements':
injectData['jiraKey'] = response.get('name', '').split(' ')[0]
for entityProperty in response.get('properties', []):
if entityProperty.get('field_name', None):
injectData[str(entityProperty.get('field_name').lower()).replace(' ', '')] = entityProperty.get('field_value_name')
toolsData += self.parseResponse(responseTemplate, response, injectData)
if totalPage == page:
pageSetFlag, nextResponse = False, False
else:
pageSetFlag, nextResponse = False, False
page = page + 1
if isHistoryApi and entity == 'test-cases' and idList:
automationData = self.automationTypeHistory(historyUrl, projectId, entity, automationType, apiHeaders, idList, idChunkSize, pageSize)
if automationData:
toolsData += automationData
if toolsData:
self.publishToolsData(toolsData, metadata)
projectTrackingDetails[entity] = {'idList': idList, 'lastModificationDate': lastTracked}
self.updateTrackingJson(self.tracking)
except Exception as err:
self.baseLogger.error(err)
finally:
self.logout(token, baseUrl)
def scheduleExtensions(self):
extensions = self.config.get('dynamicTemplate', {})
|
Balandat/cont_no_regret
|
old_code/NLopt.py
|
Python
|
mit
| 1,667 | 0.010198 |
'''
Nonlinear optimization by use of Affine DualAveraging
@author: Maximilian Balandat
@date: May 13, 2015
'''
import numpy as np
from .Domains import nBox
class NLoptProblem():
""" Basic class describing a Nonlinear Optimization problem. """
def __init__(self, domain, objective):
""" Constructor for the basic problem class. Here objective is a callable that
provides val and grad methods for computing value and gradient, respectively. """
if not isinstance(domain, nBox):
raise Exception('For now only nBoxes are supported!')
self.domain, self.objective = domain, objective
def run_minimization(self, etas, N, **kwargs):
""" Runs the minimization of the objective function based on interpreting
the value/gradient at the current iterate as an affine loss function
and applying dual averaging with the Exponential Potential. """
t, T, = 1, len(etas)
A = np.zeros((N, self.domain.n))
actions = [self.domain.sample_uniform(N)]
|
bounds = np.array(self.domain.bounds)
while t<T:
A += self.objective.grad(actions[-1])
actions.append(quicksample(bounds, A, etas[t]))
t += 1
|
return actions
def quicksample(bounds, A, eta):
""" Function returning actions sampled from the solution of the Dual Averaging
update on an Box with Affine losses, Exponential Potential. """
C1, C2 = np.exp(-eta*A*bounds[:,0]), np.exp(-eta*A*bounds[:,1])
Finv = lambda U: -np.log(C1 - (C1-C2)*U)/A/eta
return Finv(np.random.rand(*A.shape))
|
asimshankar/tensorflow
|
tensorflow/python/ops/rnn_cell_impl.py
|
Python
|
apache-2.0
| 61,366 | 0.004856 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import numbers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
# This can be used with self.assertRaisesRegexp for assert_like_rnnce
|
ll.
ASSERT_LIKE_RNNCELL_ERROR_REGEXP = "is not an RNNCell"
def assert_like_rnncell(cell_name, cell):
"""Raises a
|
TypeError if cell is not like an RNNCell.
NOTE: Do not rely on the error message (in particular in tests) which can be
subject to change to increase readability. Use
ASSERT_LIKE_RNNCELL_ERROR_REGEXP.
Args:
cell_name: A string to give a meaningful error referencing to the name
of the functionargument.
cell: The object which should behave like an RNNCell.
Raises:
TypeError: A human-friendly exception.
"""
conditions = [
hasattr(cell, "output_size"),
hasattr(cell, "state_size"),
hasattr(cell, "get_initial_state") or hasattr(cell, "zero_state"),
callable(cell),
]
errors = [
"'output_size' property is missing",
"'state_size' property is missing",
"either 'zero_state' or 'get_initial_state' method is required",
"is not callable"
]
if not all(conditions):
errors = [error for error, cond in zip(errors, conditions) if not cond]
raise TypeError("The argument {!r} ({}) is not an RNNCell: {}.".format(
cell_name, cell, ", ".join(errors)))
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s"
% (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = array_ops.zeros(c, dtype=dtype)
if not context.executing_eagerly():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size)
@tf_export("nn.rnn_cell.RNNCell")
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
super(RNNCell, self).__init__(
trainable=trainable, name=name, dtype=dtype, **kwargs)
# Attribute that indicates whether the cell is a TF RNN cell, due the slight
# difference between TF and Keras RNN cell.
self._is_tf_rnn_cell = True
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs,
|
simongibbons/numpy
|
numpy/polynomial/__init__.py
|
Python
|
bsd-3-clause
| 6,788 | 0.000148 |
"""
A sub-package for efficiently dealing with polynomials.
Within the documentation for this sub-package, a "finite power series,"
i.e., a polynomial (also referred to simply as a "series") is represented
by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
order term to highest. For example, array([1,2,3]) represents
``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
applicable to the specific module in question, e.g., `polynomial` (which
"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
all operations on polynomials, including evaluation at an argument, are
implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
This package provides *convenience classes* for each of six different kinds
of polynomials:
======================== ================
**Name** **Provides**
======================== ================
`~polynomial.Polynomial` Power series
`~chebyshev.Chebyshev` Chebyshev series
`~legendre.Legendre` Legendre series
`~laguerre.Laguerre` Laguerre series
`~hermite.Hermite` Hermite series
`~hermite_e.HermiteE` HermiteE series
======================== ================
These *convenience classes* provide a consistent interface for creating,
manipulating, and fitting data with polynomials of different bases.
The convenience classes are the preferred interface for the `~numpy.polynomial`
package, and are available from the ``numpy.polynomial`` namespace.
This eliminates the need to navigate to the corresponding submodules, e.g.
``np.polynomial.Polynomial`` or ``np.polynomial.Chebyshev`` instead of
``np.polynomial.polynomial.Polynomial`` or
``np.polynomial.chebyshev.Chebyshev``, respectively.
The classes provide a more consistent and concise interface than the
type-specific functions defined in the submodules for each type of polynomial.
For example, to fit a Chebyshev polynomial with degree ``1`` to data given
by arrays ``xdata`` and ``ydata``, the
`~chebyshev.Chebyshev.fit` class method::
>>> from numpy.polynomial import Chebyshev
>>> c = Chebyshev.fit(xdata, ydata, deg=1)
is preferred over the `chebyshev.chebfit` function from the
``np.polynomial.chebyshev`` module::
>>> from numpy.polynomial.chebyshev import chebfit
>>> c = chebfit(xdata, ydata, deg=1)
See :doc:`routines.polynomials.classes` for more details.
Convenience Classes
===================
The following lists the various constants and methods common to all of
the classes representing the various kinds of polynomials. In the following,
the term ``Poly`` represents any one of the convenience classes (e.g.
`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.)
while the lowercase ``p`` represents an **instance** of a polynomial class.
Constants
---------
- ``Poly.domain`` -- Default domain
- ``Poly.window`` -- Default window
- ``Poly.basis_name`` -- String used to represent the basis
- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed
- ``Poly.nickname`` -- String used in printing
Creation
--------
Methods for creating polynomial instances.
- ``Poly.basis(degree)`` -- Basis polynomial of given degree
- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x``
- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients
determined by the least-squares fit to the data ``x``, ``y``
- ``Poly.fromroots(roots)`` -- ``p`` with specified roots
- ``p.copy()`` -- Create a copy of ``p``
Conversion
----------
Methods for converting a polynomial instance of one kind to another.
- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly``
- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map
between ``domain`` and ``window``
Calculus
--------
- ``p.deriv()`` -- Take the derivative of ``p``
- ``p.integ()`` -- Integrate ``p``
Validation
----------
- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match
- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match
- ``Poly.has_sametype(p1, p2)`` -- Check if types match
- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match
Misc
----
- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain``
- ``p.mapparms()`` -- Return the parameters for the linear mapping between
``domain`` and ``window``.
- ``p.roots()`` -- Return the roots of `p`.
- ``p.trim()`` -- Remove trailing coefficients.
- ``p.cutdeg(degree)`` -- Truncate p to given degree
- ``p.truncate(size)`` -- Truncate p to given size
"""
from .polynomial import Polynomial
from .chebyshev import Chebyshev
from .legendre import Legendre
from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
__all__ = [
"set_default_printstyle",
"polynomial", "Polynomial",
"chebyshev
|
", "Chebyshev",
"legendre", "Legendre",
"hermite", "Hermite",
"hermite_e", "HermiteE",
"laguerre", "Laguerre",
]
def set_default_printstyle(style):
"""
|
Set the default format for the string representation of polynomials.
Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii'
or 'unicode'.
Parameters
----------
style : str
Format string for default printing style. Must be either 'ascii' or
'unicode'.
Notes
-----
The default format depends on the platform: 'unicode' is used on
Unix-based systems and 'ascii' on Windows. This determination is based on
default font support for the unicode superscript and subscript ranges.
Examples
--------
>>> p = np.polynomial.Polynomial([1, 2, 3])
>>> c = np.polynomial.Chebyshev([1, 2, 3])
>>> np.polynomial.set_default_printstyle('unicode')
>>> print(p)
1.0 + 2.0·x¹ + 3.0·x²
>>> print(c)
1.0 + 2.0·T₁(x) + 3.0·T₂(x)
>>> np.polynomial.set_default_printstyle('ascii')
>>> print(p)
1.0 + 2.0 x**1 + 3.0 x**2
>>> print(c)
1.0 + 2.0 T_1(x) + 3.0 T_2(x)
>>> # Formatting supersedes all class/package-level defaults
>>> print(f"{p:unicode}")
1.0 + 2.0·x¹ + 3.0·x²
"""
if style not in ('unicode', 'ascii'):
raise ValueError(
f"Unsupported format string '{style}'. Valid options are 'ascii' "
f"and 'unicode'"
)
_use_unicode = True
if style == 'ascii':
_use_unicode = False
from ._polybase import ABCPolyBase
ABCPolyBase._use_unicode = _use_unicode
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
berserkerbernhard/Lidskjalv
|
code/networkmonitor/modules/serviceutilities/rdp.py
|
Python
|
gpl-3.0
| 1,463 | 0 |
i
|
mport subprocess
import os
import dialog
class RDP():
def __init__(self):
self.d = dialog.Dialog(dialog="dialog")
self.storage_path = os.path.expanduser("~/LidskjalvData")
def show_rdp_menu(self, site, host):
# """ """
# FIXME
# print("FIXME: rdp_menu")
# sys.exit(1)
while True:
choices = []
|
choices.append(["", " "])
choices.append(["Q", "Quit"])
sz = os.get_terminal_size()
# width = sz.columns
# height = sz.lines
code, tag = self.d.menu(
"Choose an action",
height=sz.lines - 5,
width=sz.columns - 8,
menu_height=sz.lines - 15,
choices=choices)
if code == self.d.OK:
if tag == "Q":
return None
if tag == "F":
subprocess.Popen(["rdesktop", host])
if tag == "90":
subprocess.Popen(["rdesktop", host])
if tag == "75":
subprocess.Popen(["rdesktop", host])
if tag == "50":
subprocess.Popen(["rdesktop", host])
if tag == "25":
subprocess.Popen(["rdesktop", host])
"""
rdesktop
-g 1824x1026
-k da
-u USER: adusername
-d DOMAIN: myad
-p PASSWORD: password
-T 'NetworkAdmin'
-a 15
192.168.7.31
"""
|
sparkslabs/kamaelia_
|
Sketches/JT/Jam/library/trunk/Axon/SchedulingComponent.py
|
Python
|
apache-2.0
| 3,988 | 0.003761 |
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ThreadedComponent import threadedcomponent, threadedadaptivecommscomponent
import heapq
import time
class SchedulingComponentMixin(object):
"""
SchedulingComponent() -> new SchedulingComponent
Base class for a threadedcomponent with an inbuilt scheduler, allowing a
component to block until a scheduled event is ready or a message is received
on an inbox.
"""
Inboxes = {"inbox" : "Standard inbox for receiving data from other components",
"control" : "Standard inbox for receiving control messages from other components",
"event" : "Scheduled events which are ready to be processed"}
def __init__(self, **argd):
super(SchedulingComponentMixin, self).__init__(**argd)
self.eventQueue = []
def scheduleRel(self, message, delay, priority=1):
"""
Schedule an event to wake the component and send a message to the
"event" inbox after a delay.
"""
return self.scheduleAbs(message, time.time() + delay, priority)
def scheduleAbs(self, message, eventTime, priority=1):
"""
Schedule an event to wake the component and send a message to the
"event" inbox after at a specified time.
"""
event = eventTime, priority, message
heapq.heappush(self.eventQueue, event)
return event
def cancelEvent(self, e
|
vent):
""" Remove
|
a scheduled event from the scheduler """
self.eventQueue.remove(event)
heapq.heapify(self.eventQueue)
def eventReady(self):
""" Returns true if there is an event ready to be processed """
if self.eventQueue:
eventTime = self.eventQueue[0][0]
if time.time() >= eventTime:
return True
return False
def pause(self):
"""
Sleep until there is either an event ready or a message is received on
an inbox
"""
if self.eventReady():
self.signalEvent()
else:
if self.eventQueue:
eventTime = self.eventQueue[0][0]
super(SchedulingComponentMixin, self).pause(eventTime - time.time())
if self.eventReady():
self.signalEvent()
else:
super(SchedulingComponentMixin, self).pause()
def signalEvent(self):
"""
Put the event message of the earliest scheduled event onto the
component's "event" inbox and remove it from the scheduler.
"""
eventTime, priority, message = heapq.heappop(self.eventQueue)
#print "Signalling, late by:", (time.time() - eventTime)
if not self.inqueues["event"].full():
self.inqueues["event"].put(message)
class SchedulingComponent(SchedulingComponentMixin, threadedcomponent):
def __init__(self, **argd):
super(SchedulingComponent, self).__init__(**argd)
class SchedulingAdaptiveCommsComponent(SchedulingComponentMixin,
threadedadaptivecommscomponent):
def __init__(self, **argd):
super(SchedulingAdaptiveCommsComponent, self).__init__(**argd)
|
krzychb/rtd-test-bed
|
components/efuse/test_efuse_host/efuse_tests.py
|
Python
|
apache-2.0
| 16,074 | 0.004977 |
#!/usr/bin/env python
from __future__ import print_function, division
import unittest
import sys
try:
import efuse_table_gen
except ImportError:
sys.path.append("..")
import efuse_table_gen
'''
To run the test on local PC:
cd ~/esp/esp-idf/components/efuse/test_efuse_host/
./efuse_tests.py
'''
class Py23TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Py23TestCase, self).__init__(*args, **kwargs)
try:
self.assertRaisesRegex
except AttributeError:
# assertRaisesRegexp is deprecated in Python3 but assertRaisesRegex doesn't exist in Python2
# This fix is used in order to avoid using the alias from the six library
self.assertRaisesRegex = self.assertRaisesRegexp
class CSVParserTests(Py23TestCase):
def test_general(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, 0, 5, Use for test name 1
name2, EFUSE_BLK3, 5, 4, Use for test name 2
"""
t = efuse_table_gen.FuseTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].field_name, 'name1')
self.assertEqual(t[0].efuse_block, 'EFUSE_BLK3')
self.assertEqual(t[0].bit_start, 0)
self.assertEqual(t[0].bit_count, 5)
self.assertEqual(t[0].comment, 'Use for test name 1')
self.assertEqual(t[1].field_name, 'name2')
self.assertEqual(t[1].efuse_block, 'EFUSE_BLK3')
self.assertEqual(t[1].bit_start, 5)
self.assertEqual(t[1].bit_count, 4)
self.assertEqual(t[1].comment, 'Use for test name 2')
def test_seq_bit_start1_fill(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, , 5,
name2, EFUSE_BLK3, , 4,
"""
t = efuse_table_gen.FuseTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].field_name, 'name1')
self.assertEqual(t[0].bit_start, 0)
self.assertEqual(t[0].bit_count, 5)
self.assertEqual(t[1].field_name, 'name2')
self.assertEqual(t[1].bit_start, 5)
self.assertEqual(t[1].bit_count, 4)
def test_seq_bit_start2_fill(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, , 5,
name2, EFUSE_BLK2, , 4,
"""
t = efuse_table_gen.FuseTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].field_name, 'name1')
self.assertEqual(t[0].bit_start, 0)
self.assertEqual(t[0].bit_count, 5)
self.assertEqual(t[1].field_name, 'name2')
self.assertEqual(t[1].bit_start, 0)
self.assertEqual(t[1].bit_count, 4)
def test_seq_bit_start3_fill(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, , 5,
name2, EFUSE_BLK2, , 4,
name3, EFUSE_BLK2, 5, 4,
"""
t = efuse_table_gen.FuseTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].field_name, 'name1')
self.assertEqual(t[0].bit_start, 0)
self.assertEqual(t[0].bit_count, 5)
self.assertEqual(t[1].field_name, 'name2')
self.assertEqual(t[1].bit_start, 0)
self.assertEqual(t[1].bit_count, 4)
self.assertEqual(t[2].field_name, 'name3')
self.assertEqual(t[2].bit_start, 5)
self.assertEqual(t[2].bit_count, 4)
def test_seq_bit_start4_fill(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, , 5,
name2, EFUSE_BLK2, , 4,
, EFUSE_BLK2, , 4,
name1, EFUSE_BLK3, , 5,
"""
with self.assertRaisesRegex(efuse_table_gen.InputError, "Field names must be unique"):
efuse_table_gen.FuseTable.from_csv(csv)
def test_seq_bit_start5_fill(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, , 5,
name2, EFUSE_BLK2, , 4,
, EFUSE_BLK2, , 4,
name3, EFUSE_BLK3, 5, 5,
"""
t = efuse_table_gen.FuseTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].field_name, 'name1')
self.assertEqual(t[0].bit_start, 0)
self.assertEqual(t[0].bit_count, 5)
self.assertEqual(t[1].field_name, 'name2')
self.assertEqual(t[1].bit_start, 0)
self.assertEqual(t[1].bit_count, 4)
self.assertEqual(t[2].field_name, 'name2')
self.assertEqual(t[2].bit_start, 4)
self.assertEqual(t[2].bit_count, 4)
self.assertEqual(t[3].field_name, 'name3')
self.assertEqual(t[3].bit_start, 5)
self.assertEqual(t[3].bit_count, 5)
def test_overlapping_bit_start_fail(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, 1, 5, Use for test name 1
name2, EFUSE_BLK3, 5, 4, Use for test name 2
"""
t = efuse_table_gen.FuseTable.from_csv(csv)
with self.assertRaisesRegex(efuse_tab
|
le_gen.InputError,
|
"overlap"):
t.verify()
def test_empty_field_name_fail(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
, EFUSE_BLK3, , 5,
name2, EFUSE_BLK2, , 4,
"""
with self.assertRaisesRegex(efuse_table_gen.InputError, "missing field name"):
efuse_table_gen.FuseTable.from_csv(csv)
def test_unique_field_name_fail(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, 0, 5, Use for test name 1
name1, EFUSE_BLK3, 5, 4, Use for test name 2
"""
with self.assertRaisesRegex(efuse_table_gen.InputError, "Field names must be unique"):
efuse_table_gen.FuseTable.from_csv(csv)
def test_bit_count_empty_fail(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3, 0, , Use for test name 1
name2, EFUSE_BLK3, 5, 4, Use for test name 2
"""
with self.assertRaisesRegex(efuse_table_gen.InputError, "empty"):
efuse_table_gen.FuseTable.from_csv(csv)
def test_bit_start_num_fail(self):
csv = """
# field_name, efuse_block(EFUSE_BLK0..EFUSE_BLK3), bit_start(0..255), bit_count, comment
name1, EFUSE_BLK3,
|
ironexmaiden/csd_post_sw
|
docs/conf.py
|
Python
|
mit
| 7,270 | 0.005227 |
# -*- coding: utf-8 -*-
#
# Bottle documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 18 18:09:50 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
bottle_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'../'))
sys.path.insert(0, bottle_dir)
import bottle
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bottle'
copyright = unicode('2009-%s, %s' % (time.strftime('%Y'), bottle.__author__))
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = ".".join(bottle.__version__.split(".")[:2])
# The full version, including alpha/beta/rc tags.
release = bottle.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo_nav.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style="bottle.css"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebar-intro.html', 'sourcelink.html', 'donation.html', 'searchbox.html'],
'**': ['localtoc.html', 'relations.html', 'sourcelink.html', 'donation.html', 'searchbox.html']
}
html_context = {
'releases': [('dev', 'development'),
('0.10', 'stable'),
('0.9', 'old stable')
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bottledoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bottle.tex', u'Bottle Documentation',
bottle.__author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo_nav.png"
# For "manual" documen
|
ts, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for in
|
tersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None)}
autodoc_member_order = 'bysource'
locale_dirs = ['./locale']
|
artPlusPlus/elemental-backend
|
elemental_backend/serialization/_immutable_type_resource_io.py
|
Python
|
mpl-2.0
| 180 | 0 |
from marshmallow import fields
from ._resource_io import ResourceSchema
class ImmutableTypeResourceSchema(ResourceSchema):
|
label = fields.String()
|
doc = fields.String()
|
fernandalavalle/mlab-ns
|
server/mapreduce/lib/simplejson/__init__.py
|
Python
|
apache-2.0
| 12,383 | 0.001615 |
#!/usr/bin/env python
r"""A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard libr
|
ary
marshal and pickle modules.
Enco
|
ding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> compact = simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
>>> # Can't assume dict ordering
>>> compact in ('[1,2,3,{"4":5,"6":7}]', '[1,2,3,{"6":7,"4":5}]')
True
Pretty printing (using repr() because of extraneous whitespace in the output)::
>>> import simplejson
>>> print repr(simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
'{\n "4": 5, \n "6": 7\n}'
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == ["foo", {"bar":["baz", None, 1.0, 2]}]
True
>>> simplejson.loads('"\\"foo\\bar"') == '"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io) == ["streaming API"]
True
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> simplejson.loads('1.1', parse_float=Decimal) == Decimal("1.1")
True
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(ComplexEncoder().iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.5'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.def
|
pattisdr/osf.io
|
admin/nodes/urls.py
|
Python
|
apache-2.0
| 2,100 | 0.003333 |
from django.conf.urls import url
from admin.nodes import views
app_name = 'admin'
urlpatterns = [
url(r'^$', views.NodeFormView.as_view(),
name='search'),
url(r'^flagged_spam$', views.NodeFlaggedSpamList.as_view(),
name='flagged-spam'),
url(r'^known_spam$', views.NodeKnownSpamList.as_view(),
name='known-spam'),
url(r'^known_ham$', views.NodeKnownHamList.as_view(),
name='known-ham'),
url(r'^(?P<guid>[a-z0-9]
|
+)/$', views.NodeVi
|
ew.as_view(),
name='node'),
url(r'^(?P<guid>[a-z0-9]+)/logs/$', views.AdminNodeLogView.as_view(),
name='node-logs'),
url(r'^registration_list/$', views.RegistrationListView.as_view(),
name='registrations'),
url(r'^stuck_registration_list/$', views.StuckRegistrationListView.as_view(),
name='stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/update_embargo/$',
views.RegistrationUpdateEmbargoView.as_view(), name='update_embargo'),
url(r'^(?P<guid>[a-z0-9]+)/remove/$', views.NodeDeleteView.as_view(),
name='remove'),
url(r'^(?P<guid>[a-z0-9]+)/restore/$', views.NodeDeleteView.as_view(),
name='restore'),
url(r'^(?P<guid>[a-z0-9]+)/confirm_spam/$', views.NodeConfirmSpamView.as_view(),
name='confirm-spam'),
url(r'^(?P<guid>[a-z0-9]+)/confirm_ham/$', views.NodeConfirmHamView.as_view(),
name='confirm-ham'),
url(r'^(?P<guid>[a-z0-9]+)/reindex_share_node/$', views.NodeReindexShare.as_view(),
name='reindex-share-node'),
url(r'^(?P<guid>[a-z0-9]+)/reindex_elastic_node/$', views.NodeReindexElastic.as_view(),
name='reindex-elastic-node'),
url(r'^(?P<guid>[a-z0-9]+)/restart_stuck_registrations/$', views.RestartStuckRegistrationsView.as_view(),
name='restart-stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/remove_stuck_registrations/$', views.RemoveStuckRegistrationsView.as_view(),
name='remove-stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/remove_user/(?P<user_id>[a-z0-9]+)/$',
views.NodeRemoveContributorView.as_view(), name='remove_user'),
]
|
wangyum/tensorflow
|
tensorflow/contrib/seq2seq/python/kernel_tests/decoder_test.py
|
Python
|
apache-2.0
| 6,979 | 0.006591 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.decoder."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
class DynamicDecodeRNNTest(test.TestCase):
def _testDynamicDecodeRNN(self, time_major, maximum_iterations=None):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
max_out = max(sequence_length)
with self.test_session(use_gpu=True) as sess:
if time_major:
in
|
puts = np.random.randn(max_time, batch_size,
input_depth).astype(np.float32)
else:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = core_rnn_cell.LSTMCell(cell_depth)
helper = helper_py.TrainingHelper(
inputs, sequence_length, time_major=time_major)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
in
|
itial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
final_outputs, final_state, final_sequence_length = (
decoder.dynamic_decode(my_decoder, output_time_major=time_major,
maximum_iterations=maximum_iterations))
def _t(shape):
if time_major:
return (shape[1], shape[0]) + shape[2:]
return shape
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertTrue(isinstance(final_state, core_rnn_cell.LSTMStateTuple))
self.assertEqual(
(batch_size,),
tuple(final_sequence_length.get_shape().as_list()))
self.assertEqual(
_t((batch_size, None, cell_depth)),
tuple(final_outputs.rnn_output.get_shape().as_list()))
self.assertEqual(
_t((batch_size, None)),
tuple(final_outputs.sample_id.get_shape().as_list()))
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"final_outputs": final_outputs,
"final_state": final_state,
"final_sequence_length": final_sequence_length,
})
# Mostly a smoke test
time_steps = max_out
if maximum_iterations is not None:
time_steps = min(max_out, maximum_iterations)
self.assertEqual(
_t((batch_size, time_steps, cell_depth)),
sess_results["final_outputs"].rnn_output.shape)
self.assertEqual(
_t((batch_size, time_steps)),
sess_results["final_outputs"].sample_id.shape)
def testDynamicDecodeRNNBatchMajor(self):
self._testDynamicDecodeRNN(time_major=False)
def testDynamicDecodeRNNTimeMajor(self):
self._testDynamicDecodeRNN(time_major=True)
def testDynamicDecodeRNNZeroMaxIters(self):
self._testDynamicDecodeRNN(time_major=True, maximum_iterations=0)
def testDynamicDecodeRNNOneMaxIter(self):
self._testDynamicDecodeRNN(time_major=True, maximum_iterations=1)
def _testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNN(
self, use_sequence_length):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
max_out = max(sequence_length)
with self.test_session(use_gpu=True) as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = core_rnn_cell.LSTMCell(cell_depth)
zero_state = cell.zero_state(dtype=dtypes.float32, batch_size=batch_size)
helper = helper_py.TrainingHelper(inputs, sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell, helper=helper, initial_state=zero_state)
# Match the variable scope of dynamic_rnn below so we end up
# using the same variables
with vs.variable_scope("root") as scope:
final_decoder_outputs, final_decoder_state, _ = decoder.dynamic_decode(
my_decoder,
# impute_finished=True ensures outputs and final state
# match those of dynamic_rnn called with sequence_length not None
impute_finished=use_sequence_length,
scope=scope)
with vs.variable_scope(scope, reuse=True) as scope:
final_rnn_outputs, final_rnn_state = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length if use_sequence_length else None,
initial_state=zero_state,
scope=scope)
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"final_decoder_outputs": final_decoder_outputs,
"final_decoder_state": final_decoder_state,
"final_rnn_outputs": final_rnn_outputs,
"final_rnn_state": final_rnn_state
})
# Decoder only runs out to max_out; ensure values are identical
# to dynamic_rnn, which also zeros out outputs and passes along state.
self.assertAllClose(sess_results["final_decoder_outputs"].rnn_output,
sess_results["final_rnn_outputs"][:, 0:max_out, :])
if use_sequence_length:
self.assertAllClose(sess_results["final_decoder_state"],
sess_results["final_rnn_state"])
def testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNNWithSeqLen(self):
self._testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNN(
use_sequence_length=True)
def testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNNNoSeqLen(self):
self._testDynamicDecodeRNNWithTrainingHelperMatchesDynamicRNN(
use_sequence_length=False)
if __name__ == "__main__":
test.main()
|
fkorotkov/pants
|
src/python/pants/java/nailgun_client.py
|
Python
|
apache-2.0
| 7,704 | 0.010903 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import logging
import os
import signal
import socket
import sys
from pants.java.nailgun_io import NailgunStreamWriter
from pants.java.nailgun_protocol import ChunkType, NailgunProtocol
from pants.util.socket import RecvBufferedSocket
logger = logging.getLogger(__name__)
class NailgunClientSession(NailgunProtocol):
"""Handles a single nailgun client session."""
def __init__(self, sock, in_fd, out_fd, err_fd, exit_on_broken_pipe=False):
self._sock = sock
if in_fd:
self._input_writer = NailgunStreamWriter(in_fd, self._sock,
ChunkType.STDIN, ChunkType.STDIN_EOF)
else:
self._input_writer = None
self._stdout = out_fd
self._stderr = err_fd
self._exit_on_broken_pipe = exit_on_broken_pipe
self.remote_pid = None
def _maybe_start_input_writer(self):
if self._input_writer:
self._input_writer.start()
def _maybe_stop_input_writer(self):
if self._input_writer:
self._input_writer.stop()
def _write_flush(self, fd, payload=None):
"""Write a payload to a given fd (if provided) and flush the fd."""
try:
if payload:
fd.write(payload)
fd.flush()
except (IOError, OSError) as e:
# If a `Broken Pipe` is encountered during a stdio fd write, we're headless - bail.
if e.errno == errno.EPIPE and self._exit_on_broken_pipe:
sys.exit()
# Otherwise, re-raise.
raise
def _process_session(self):
"""Process the outputs of the nailgun session."""
try:
for chunk_type, payload in self.iter_chunks(self._sock, return_bytes=True):
if chunk_type == ChunkType.STDOUT:
self._write_flush(self._stdout, payload)
elif chunk_type == ChunkType.STDERR:
self._write_flush(self._stderr, payload)
elif chunk_type == ChunkType.EXIT:
self._write_flush(self._stdout)
self._write_flush(self._stderr)
return int(payload)
elif chunk_type == ChunkType.PID:
self.remote_pid = int(payload)
elif chunk_type == ChunkType.START_READING_INPUT:
self._maybe_start_input_writer()
else:
raise self.ProtocolError('received unexpected chunk {} -> {}'.format(chunk_type, payload))
finally:
# Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in
# NailgunProtocol.iter_chunks(). This ensures the NailgunStreamWriter is always stopped.
self._maybe_stop_input_writer()
def execute(self, working_dir, main_class, *arguments, **environment):
# Send the nailgun request.
self.send_request(self._sock, working_dir, main_class, *arguments, **environment)
# Process the remainder of the nailgun session.
return self._process_session()
class NailgunClient(object):
"""A python nailgun client (see http://martiansoftware.com/nailgun for more info)."""
class NailgunError(Exception):
"""Indicates an error interacting with a nailgun server."""
class NailgunConnectionError(NailgunError):
"""Indicates an error upon initial connect to the nailgun server."""
# For backwards compatibility with nails expecting the ng c client special env vars.
ENV_DEFAULTS = dict(NAILGUN_FILESEPARATOR=os.sep, NAILGUN_PATHSEPARATOR=os.pathsep)
DEFAULT_NG_HOST = '127.0.0.1'
DEFAULT_NG_PORT = 2113
def __init__(self, host=DEFAULT_NG_HOST, port=DEFAULT_NG_PORT, ins=sys.stdin, out=None, err=None,
workdir=None, exit_on_broken_pipe=False):
"""Creates a nailgun client that can be used to issue zero or more nailgun commands.
:param string host: the nailgun server to contact (defaults to '127.0.0.1')
:param int port: the port the nailgun server is listening on (defaults to the default nailgun
port: 2113)
:param file ins: a file to read command standard input from (defaults to stdin) - can be None
in which case no input is read
:param file out: a stream to write command standard output to (defaults to stdout)
:param file err: a stream to write command standard error to (defaults to stderr)
:param string workdir: the default working directory for all nailgun commands (defaults to CWD)
:param bool exit_on_broken_pipe: whether or not to exit when `Broken Pipe` errors are encountered.
"""
self._host = host
self._port = port
self._stdin = ins
self._stdout = out or sys.stdout
self._stderr = err or sys.stderr
self._workdir = workdir or os.path.abspath(os.path.curdir)
self._exit_on_broken_pipe = exit_on_broken_pipe
self._session = None
def try_connect(self):
"""Creates a socket, connects it to the nailgun and returns the connected socket.
:returns: a connected `socket.socket`.
:raises: `NailgunClient.NailgunConnectionError` on failure to connect.
"""
sock = RecvBufferedSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
try:
sock.connect((self._host, self._port))
except (socket.error, socket.gaierror) as e:
logger.debug('Encountered socket exception {!r} when attempting connect to nailgun'.format(e))
sock.close()
raise self.NailgunConnectionError(
'Problem connecting to nailgun server at {}:{}: {!r}'.format(self._host, self._port, e))
else:
return sock
def send_control_c(self):
"""Sends SIGINT to a nailgun server using pid information from the active session."""
if self._session and self._session.remote_pid is not None:
os.kill(self._session.remote_pid, signal.SIGINT)
def execute(self, main_class, cwd=None, *args, **environment):
"""Executes the given main_class with any supplied args in the given environment.
:param string main_class: the fully qualified class name of the main entrypoint
:param string cwd: Set the working directory for this command
:param list args: any arguments to pass to the main entrypoint
:param dict environment: an env mapping made available to native nails via the nail context
:returns: the exit code of the main_class.
"""
environment = dict(self.ENV_DEFAULTS.items() + environment.items())
cwd = cwd or self._workdir
# N.B. This can throw NailgunConnectionError (catchable via NailgunError).
sock = self.try_connect()
self._session = NailgunClientSession(sock,
self._stdin,
self._stdout,
self._stderr,
self._exit_on_broken_pipe)
try:
return self._session.execute(cwd, main_class, *args, **environment)
except socket.error
|
as e:
raise self.NailgunError('Problem communicating with nailgun server at {}:{}: {!r}'
.format(self._host, self._port, e))
except NailgunProtocol.ProtocolError as e:
raise self.NailgunError('Problem in nailgun protocol with nailgun server at {}:{}: {!r}'
.format(self._host, self._port, e))
finally:
sock.close()
self._session = None
|
def __repr__(self):
return 'NailgunClient(host={!r}, port={!r}, workdir={!r})'.format(self._host,
self._port,
self._workdir)
|
b29308188/cs598vqa
|
src/CBP/reuse_test.py
|
Python
|
mit
| 98 | 0.040816 |
import tensor
|
flow as tf
def f():
with tf.variable_scope('A') as scope:
print scope.reu
|
se
f()
|
jdmcbr/Shapely
|
tests/test_multilinestring.py
|
Python
|
bsd-3-clause
| 3,200 | 0.000625 |
from . import unittest, numpy, test_int_types
from .test_multi import MultiGeometryTestCase
from shapely.geos import lgeos
from shapely.geometry import LineString, MultiLineString, asMultiLineString
from shapely.geometry.base import dump_coords
class MultiLineStringTestCase(MultiGeometryTestCase):
def test_multilinestring(self):
# From coordinate tuples
geom = MultiLineString((((1.0, 2.0), (3.0, 4.0)),))
self.assertIsInstance(geom, MultiLineString)
self.assertEqual(len(geom.geoms), 1)
self.assertEqual(dump_coords(geom), [[(1.0, 2.0), (3.0, 4.0)]])
# From lines
a = LineString(((1.0, 2.0), (3.0, 4.0)))
ml = MultiLineString([a])
self.assertEqual(len(ml.geoms), 1)
self.assertEqual(dump_coords(ml), [[(1.0, 2.0), (3.0, 4.0)]])
# From another multi-line
ml2 = MultiLineString(ml)
self.assertEqual(len(ml2.geoms), 1)
self.assertEqual(dump_coords(ml2), [[(1.0, 2.0), (3.0, 4.0)]])
# Sub-geometry Access
geom = MultiLineString([(((0.0, 0.0), (1.0, 2.0)))])
self.assertIsInstance(geom[0], LineString)
self.assertEqual(dump_coords(geom[0]), [(0.0, 0.0), (1.0, 2.0)])
with self.assertRaises(IndexError): # index out of range
geom.geoms[1]
# Geo interface
self.assertEqual(geom.__geo_interface__,
{'type': 'MultiLineString',
'coordinates': (((0.0, 0.0), (1.0, 2.0)),)})
def test_from_multilinestring_z(self):
coords1 = [(0.0, 1.0, 2.0), (3.0, 4.0, 5.0)]
coords2 = [(6.0, 7.0, 8.0), (9.0, 10.0, 11.0)]
# From coordinate tuples
m
|
l = MultiLineString([coords1, coords2])
copy = MultiLineString(ml)
self.assertIsInstance(copy, MultiLineString)
self.assertEqual('MultiLineString',
lgeos.GEOSGeomType(copy._geom).decode('ascii'))
self.assertEqual(len(copy.geoms), 2)
self.assertEqual(dump_coords(copy.geoms[0]), coords1)
self.assertEqual(dump_coords(copy.geoms[1]), coords2)
@unittest.skipIf(not numpy, 'Numpy required')
def test_numpy(
|
self):
from numpy import array
from numpy.testing import assert_array_equal
# Construct from a numpy array
geom = MultiLineString([array(((0.0, 0.0), (1.0, 2.0)))])
self.assertIsInstance(geom, MultiLineString)
self.assertEqual(len(geom.geoms), 1)
self.assertEqual(dump_coords(geom), [[(0.0, 0.0), (1.0, 2.0)]])
# Adapt a sequence of Numpy arrays to a multilinestring
a = [array(((1.0, 2.0), (3.0, 4.0)))]
geoma = asMultiLineString(a)
assert_array_equal(geoma.context, [array([[1., 2.], [3., 4.]])])
self.assertEqual(dump_coords(geoma), [[(1.0, 2.0), (3.0, 4.0)]])
# TODO: is there an inverse?
def test_subgeom_access(self):
line0 = LineString([(0.0, 1.0), (2.0, 3.0)])
line1 = LineString([(4.0, 5.0), (6.0, 7.0)])
self.subgeom_access_test(MultiLineString, [line0, line1])
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(MultiLineStringTestCase)
|
jkatzsam/matchtools
|
matchtools/hamming.py
|
Python
|
bsd-3-clause
| 1,270 | 0.040157 |
# -*- coding: utf-8 -*-
"""hamming.py: Return the Hamming distance between two integers (bitwise)."""
__author__ = "Russell J. Funk"
__date__ = "February 7, 2013"
__copyright__ = "Copyright (C) 2013"
__reference__ = ["http://wiki.python.org/moin/BitManipulation",
"http://en.wikipedia.org/wiki/Hamming_distance"]
__status__ = "Prototype"
def hamming(a, b):
"""Calculate the Hamming distance between two integers (bitwise).
Args:
a: a list of 1s and 0s
b: a list of 1s and 0s
Returns:
The hamming distance between two integers.
Raises:
|
Value Error: Inputs must have the same bit length.
"""
if len(a) != len(b):
raise ValueError("Inputs must have same bit length.")
else:
distance = 0
for i in range(len(a)):
if a[i] != b[i]:
distance += 1
retu
|
rn distance
def hamming_ratio(a, b, bits = 384):
"""Calculates the hamming ratio between two integers
represented as a list of bits.
Args:
a and b must be lists of 1s and 0s; the calculation
is relative to the number of bits.
Returns:
The hamming ratio between two integers.
"""
return float((bits - hamming(a,b)))/bits
|
afrantzis/pixel-format-guide
|
tests/__init__.py
|
Python
|
lgpl-2.1
| 768 | 0 |
# Copyright © 2017 Collabora Ltd.
#
# This file is part of pfg.
#
# pfg is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option)
# any later version.
#
# pfg is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILIT
|
Y or FITNESS
# FOR A PARTICU
|
LAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pfg. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alexandros Frantzis <alexandros.frantzis@collabora.com>
|
mir-group/flare
|
flare/utils/parameter_helper.py
|
Python
|
mit
| 49,237 | 0.001361 |
"""
For multi-component systems, the configurational space can be highly complicated.
One may want to use different hyper-parameters and cutoffs for different interactions,
or do constraint optimisation for hyper-parameters.
To use more hyper-parameters, we need special kernel function that can differentiate different
pairs, triplets and other descriptors and determine which number to use for what interaction.
This kernel can be enabled by using the ``hyps_mask`` argument of the GaussianProcess class.
It contains multiple arrays to describe how to break down the array of hyper-parameters and
apply them when computing the kernel. Detail descriptions of this argument can be seen in
kernel/mc_sephyps.py.
The ParameterHelper class is to generate the hyps_mask with a more human readable interface.
Example:
>>> pm = ParameterHelper(species=['C', 'H', 'O'],
... kernels={'twobody':[['*', '*'], ['O','O']],
... 'threebody':[['*', '*', '*'],
... ['O','O', 'O']]},
... parameters={'twobody0':[1, 0.5, 1], 'twobody1':[2, 0.2, 2],
... 'threebody0':[1, 0.5], 'threebody1':[2, 0.2],
... 'cutoff_threebody':1},
... constraints={'twobody0':[False, True]})
>>> hm = pm.as_dict()
>>> kernels = hm['kernels']
>>> gp_model = GaussianProcess(kernels=kernels,
... hyps=hyps, hyps_mask=hm)
In this example, four atomic species are involved. There are many kinds
of twobodys and threebodys. But we only want to use eight different signal variance
and length-scales.
In order to do so, we first define all the twobodys to be group "twobody0", by
listing "*-*" as the first element in the twobody argument. The second
element O-O is then defined to be group "twobody1". Note that the order
matters here. The later element overrides the ealier one. If
twobodys=[['O', 'O'], ['*', '*']], then all twobodys belong to group "twobody1".
Similarly, O-O-O is defined as threebody1, while all remaining ones
are left as threebody0.
The hyperpameters for each group is listed in the order of
[sig, ls, cutoff] in the parameters argument. So in this example,
O-O interaction will use [2, 0.2, 2] as its sigma, length scale, and
cutoff.
For threebody, the parameter arrays only come with two elements. So there
is no cutoff associated with threebody0 or threebody1; instead, a universal
cutoff is used, which is defined as 'cutoff_threebody'.
The constraints argument define which hyper-parameters will be optimized.
True for optimized and false for being fixed.
Here are a couple more simple examples.
Define a 5-parameter 2+3 kernel (1, 0.5, 1, 0.5, 0.05)
>>> pm = ParameterHelper(kernels=['twobody', 'threebody'],
... parameters={'sigma': 1,
... 'lengthscale': 0.5,
... 'cutoff_twobody': 2,
... 'cutoff_threebody': 1,
... 'noise': 0.05})
Define a 5-parameter 2+3 kernel (1, 1, 1, 1, 0.05)
>>> pm = ParameterHelper(kernels=['twobody', 'threebody'],
... parameters={'cutoff_twobody': 2,
... 'cutoff_threebody': 1,
... 'noise': 0.05},
... ones=ones,
... random=not ones)
Define a 9-parameter 2+3 kernel
>>> pm = ParameterHelper()
>>> pm.define_group('specie', 'O', ['O'])
>>> pm.define_group('specie', 'rest', ['C', 'H'])
>>> pm.define_group('twobody', '**', ['*', '*'])
>>> pm.define_group('twobody', 'OO', ['O', 'O'])
>>> pm.define_group('threebody', '***', ['*', '*', '*'])
>>> pm.define_group('threebody', 'Oall', ['O', 'O', 'O'])
>>> pm.set_parameters('**', [1, 0.5])
>>> pm.set_parameters('OO', [1, 0.5])
>>> pm.set_parameters('Oall', [1, 0.5])
>>> pm.set_parameters('***', [1, 0.5])
>>> pm.set_parameters('cutoff_twobody', 5)
>>> pm.set_parameters('cutoff_threebody', 4)
See more examples in functions ``ParameterHelper.define_group`` , ``ParameterHelper.set_parameters``,
and in the tests ``tests/test_parameters.py``
If you want to add in a new hyperparameter set to an already-existing GP, you can perform the
following steps:
>> hyps_mask = pm.as_dict()
>> hyps = hyps_mask['hyps']
>> kernels = hyps_mask['kernels']
>> gp_model.update_kernel(kernels, 'mc', hyps_mask)
>> gp_model.hyps = hyps
"""
import inspect
import json
import logging
import math
import numpy as np
import pickle
import time
from copy import deepcopy
from itertools import combinations_with_replacement, permutations
from numpy import array as nparray
from numpy import max as npmax
from numpy.random import random as nprandom
from typing import List, Callable, Union
from flare.output import set_logger
from flare.parameters import Parameters
from flare.utils.element_coder import element_to_Z, Z_to_element
class ParameterHelper:
"""
A helper class to construct the hyps_mask dictionary for AtomicEnvironment
, GaussianProcess and MappedGaussianProcess
Args:
hyps_mask (dict): Not implemented yet
species (dict, list): Define specie groups
kernels (dict, list): Define kernels and groups for the kernels
|
cutoff_groups (dict): Define different cutoffs f
|
or different species
parameters (dict): Define signal variance, length scales, and cutoffs
constraints (dict): If listed as False, the cooresponding hyperparmeters
will not be trained
allseparate (bool): If True, define each type pair/triplet into a
separate group.
random (bool): If True, randomized all signal variances and lengthscales
one (bool): If True, set all signal variances and lengthscales to one
verbose (str): Level to print with "ERROR", "WARNING", "INFO", "DEBUG"
* the ``species`` is an optional input. It can be left as None if the user only wants
to set up one group of hyper-parameters for each kernel.
* the ``kernels`` can be defined along with or without groups. But the later mode
is not compatible with the ``allseparate`` flag.
>>> kernels=['twobody', 'threebody'],
or
>>> kernels={'twobody':[['*', '*'], ['O','O']],
... 'threebody':[['*', '*', '*'],
... ['O','O', 'O']]},
Current options for the kernels are twobody, threebody and manybody (based on coordination number).
* See format of ``species``, ``kernels`` (dict), and ``cutoff_groups`` in ``list_groups()`` function.
* See format of ``parameters`` and ``constraints`` in ``list_parameters()`` function.
"""
# TO DO, sync it to kernel class
# need to be synced with kernel class
# name of the kernels
all_kernel_types = ["twobody", "threebody", "manybody"]
cutoff_types = {"cut3b": "threebody"}
cutoff_types_keys = list(cutoff_types.keys())
cutoff_types_values = list(cutoff_types.values())
additional_groups = []
# dimension of the kernels
ndim = {"twobody": 2, "threebody": 3, "manybody": 2, "cut3b": 2}
n_kernel_parameters = {"twobody": 2, "threebody": 2, "manybody": 2, "cut3b": 0}
def __init__(
self,
hyps_mask=None,
species=None,
kernels={},
cutoff_groups={},
parameters=None,
constraints={},
allseparate=False,
random=False,
ones=False,
verbose="WARNING",
):
self.logger = set_logger(
"ParameterHelper", stream=True, fileout_name=None, verbose=verbose
)
self.all_group_types = (
ParameterHelper.all_kernel_types
+ self.cutoff_types_keys
+ self.additional_groups
)
self.all_types = ["specie"] + self.all_group_types
# number of groups {'twobody': 1, 'threebody': 2}
self.n = {}
# definition of groups {'specie': [['C', 'H'], ['O']],
# 'twobody': [[['*', '*']], [[ele1, ele2]]]}
self.groups = {}
# joint values of the groups {'specie': ['C', 'H', 'O'],
# 'twobody': [['*
|
bureaucratic-labs/yargy
|
yargy/api.py
|
Python
|
mit
| 1,590 | 0 |
from .check import assert_type
from .predicates import (
eq,
is_predicate,
Predicate,
AndPredicate,
OrPredicate,
NotPredicate,
)
from .relations import (
is_relation,
Main,
Relation,
AndRelation,
OrRelation,
NotRelation
)
from .rule import (
is_rule,
Production,
Rule,
OrRule,
EmptyRule,
ForwardRule,
)
__all__ = [
'rule',
'empty',
'forward',
'and_',
'or_',
'not_',
]
def prepare_production_item(item):
if not isinstance(item, (Predicate, Rule, Main)):
return eq(item)
else:
return item
def rule(*items):
producti
|
on = Production([prepare_production_item(_) for _ in items])
return Rule([production])
empty = EmptyRule
forward = ForwardRule
def and_(*items):
if all(is_predicate(_
|
) for _ in items):
return AndPredicate(items)
elif all(is_relation(_) for _ in items):
return AndRelation(items)
else:
types = [type(_) for _ in items]
raise TypeError('mixed types: %r' % types)
def or_(*items):
if all(is_predicate(_) for _ in items):
return OrPredicate(items)
elif all(is_relation(_) for _ in items):
return OrRelation(items)
elif all(is_rule(_) for _ in items):
return OrRule(items)
else:
types = [type(_) for _ in items]
raise TypeError('mixed types: %r' % types)
def not_(item):
assert_type(item, (Predicate, Relation))
if is_predicate(item):
return NotPredicate(item)
elif is_relation(item):
return NotRelation(item)
|
ghchinoy/tensorflow
|
tensorflow/contrib/distribute/python/monitor.py
|
Python
|
apache-2.0
| 2,460 | 0.005691 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitor is responsible for training, checkpointing and recovery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from ten
|
sorflow.python.framework import errors
from tensorflow.python.ops import variables
class Monitor(object):
"""Executes training steps, recovers and checkpoints.
Note that this class is particularly preliminary, experimental, and
expected
|
to change.
"""
# TODO(isaprykin): Support step functions that need multiple session calls.
# TODO(isaprykin): Support extra arguments to the step function.
# TODO(isaprykin): Support recovery, checkpointing and summaries.
def __init__(self, step_callable, session=None):
"""Initialize the Monitor with components for executing training steps.
Args:
step_callable: a training `Step` that's capable of signaling when done.
session: a `Session` instance that's needed for graph mode.
Raises:
ValueError: if `session` was provided for eager mode or not provided for
graph mode.
"""
if context.executing_eagerly():
if session is not None:
raise ValueError("Should not provide a `session` in Eager mode.")
self._run_step = step_callable
else:
if session is None:
raise ValueError("Should provide a `session` in Graph mode.")
session.run(step_callable.initialize())
self._run_step = session.make_callable(step_callable())
session.run(variables.global_variables_initializer())
def run_steps(self, num_steps=None):
step = 0
while num_steps is None or step < num_steps:
try:
self._run_step()
step += 1
except errors.OutOfRangeError:
break
|
lechuckcaptain/urlwatch
|
lib/urlwatch/__init__.py
|
Python
|
bsd-3-clause
| 598 | 0.001672 |
"""A tool for monitoring webpages for updates
urlwatch is intended to help you watch changes in webpages and get notified
(via email, in your terminal or with a custom-written reporter class) of any
changes. The change notification will include the URL that
|
has changed and
a unified diff of what has changed.
"""
pkgname = 'urlwatch'
__copyright__ = 'Copyright 2008-2016 Thomas Perl'
__author__ = 'Thomas Perl <m@thp.io>'
__license__ = 'BSD'
__url__ = 'http://thp.io/2008/urlwatch/'
__version__ = '2.5'
__user_agent__ = '%s/%s (+http://thp.io/200
|
8/urlwatch/info.html)' % (pkgname, __version__)
|
OpenDaisy/daisy-client
|
daisyclient/openstack/common/_i18n.py
|
Python
|
apache-2.0
| 1,733 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
try:
import oslo.i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators =
|
oslo.i18n.TranslatorFactory(domain='glanceclient')
# The primary translation function using the well-known name "_"
|
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
# code from oslo-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x
|
jaric-thorning/ProjectCapital
|
GUI.py
|
Python
|
mit
| 6,637 | 0.02019 |
import datetime
from Tkinter import *
import tkMessageBox
import tkFileDialog
import random
import time
import share
class App(object):
'''Controls the running of the app'''
def __init__(self, master = None):
'''A controller class that runs the app
Constructor: Controller(object)'''
## Graphics
#TOP WINDOW
self._master = master
self._master.resizable(FALSE, FALSE)
self._height = 300
self._width = 900
self._master.minsize(self._width, self._height)
self._master.title("Project Capital")
self._canvas_height = self._height - 50
self._canvas_width = self._width
self.options = OptionsFrame(master, self)
self.options.pack(side = TOP, f
|
ill = X)
self.canvas = Canvas(master, bg
|
= "black", height = self._canvas_height, width = self._canvas_width)
self.canvas.pack(side = TOP, fill = BOTH, expand = False)
#File Button
# create a toplevel menu
menubar = Menu(master)
menubar.add_command(label="Hello!")
menubar.add_command(label="Quit!")
# display the menu
master.config(menu=menubar)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open File")
filemenu.add_command(label="Save File")
filemenu.add_separator()
filemenu.add_command(label="Exit")
menubar.add_cascade(label="File", menu=filemenu)
## Ops
self._drawlist = []
self.draw_example_graph()
self.draw()
def clear(self):
self._drawlist = []
def draw(self):
self.canvas.delete(ALL)
for draw in self._drawlist:
self.canvas.create_line(draw.x0, draw.y0, draw.x1, draw.y1, fill = draw.get_fill())
self.canvas.pack()
def add_line(self, x0, y0, x1, y1, fill = None):
new_line = Draw_line(x0, y0, x1, y1)
if fill != None:
new_line.set_fill(fill)
else:
new_line.set_fill("blue")
self._drawlist.append(new_line)
def draw_rect(self):
self.add_line(-10,10,10,100)
self.add_line(10,100,100,100)
self.add_line(100,100,100,10)
self.add_line(100,10,10,10)
def draw_example_graph(self):
r = random.randrange(0,self._canvas_height)
randsum = 0
for i in range(0, self._canvas_width):
randnum = random.randrange(-1000,1000)
r = r + randnum/100.00 - ((self._canvas_height + r/2)/self._canvas_height) + 1
if r < 0: r = 0
self.add_line(i,self._canvas_height,i,self._canvas_height - r)
def load_historic(self, share_temp, share_historic):
self.clear()
print share_temp.get_name()
i = 0
max_value = float(0.0)
firstvalue = 0
for day in share_historic:
if float(day["Close"]) > max_value: max_value = float(day["Close"])
print "Max Value: " + str(max_value)
multiplyer = (self._canvas_height - 10)/max_value
print "Multipler: " + str(multiplyer)
for day in reversed(share_historic):
#print "n: " + str(self._canvas_height - float(day["Close"]))
#print float(day["Close"])
if i != 0:
if float(day["Close"]) < firstvalue:
self.add_line(i,self._canvas_height,i,self._canvas_height - float(day["Close"])*multiplyer, "red")
else:
self.add_line(i,self._canvas_height,i,self._canvas_height - float(day["Close"])*multiplyer, "green")
else:
self.add_line(i,self._canvas_height,i,self._canvas_height - float(day["Close"])*multiplyer)
firstvalue = float(day["Close"])
i += 1
class Draw_line(object):
def __init__(self, x0, y0, x1, y1):
self.x0 = x0
self.x1 = x1
self.y0 = y0
self.y1 = y1
self.fill = "black"
def set_fill(self, fill):
self.fill = fill
return
def get_fill(self):
return self.fill
class OptionsFrame(Frame):
"""Lower level GUI interface responsible for the interface to
interact with the user.
"""
def __init__ (self, master, boss):
self._master = master
self._boss = boss
Frame.__init__(self, master)
data = Frame(master)
#ENTER LABEL
symbol = Label(data,text = "Enter Symbol:").pack(side=LEFT)
#ENTER ENTRY
self.date_entry = Entry(data)
self.date_entry.pack(side=LEFT, fill = BOTH, expand = True)
#ENTER BUTTON
Button(data,text = "Enter", command = self.update_now).pack(side=LEFT)
self.t_symbol = StringVar()
self.t_close = StringVar()
self.t_change = StringVar()
self.share_change_color = StringVar()
self.share_change_color.set("black")
self.share_name = Label(data, textvariable = self.t_symbol)
self.share_close = Label(data, textvariable = self.t_close)
self.share_change = Label(data, textvariable = self.t_change, fg = self.share_change_color.get())
self.share_name.pack(side=LEFT, padx = 5)
self.share_close.pack(side=LEFT, padx = 5)
self.share_change.pack(side=LEFT, padx = 5)
data.pack(anchor = 'sw', padx = 5, pady = 5)
def update_now(self):
print "updating"
share_string = str(self.date_entry.get()) + ".AX"
share_temp = share.Stock_Info(share_string)
p_date = '2013-01-01'
t_date = time.strftime("%Y-%m-%d")
share_historic = share_temp.get_historical1(p_date,t_date)
self._boss.clear()
self._boss.load_historic(share_temp, share_historic)
self._boss.draw()
share_temp.update_quote()
self.t_symbol.set(share_temp.get_symbol())
print share_temp.get_name()
self.t_close.set(share_temp.get_quote())
print share_temp.get_quote()
self.t_change.set(share_temp.get_change() + "%")
print share_temp.get_change()
change = float(share_temp.get_change())
if change < 0:
self.share_change.config(fg = "red")
elif change > 0:
self.share_change.config(fg = "green")
else:
self.share_change.config(fg = "black")
self.share_change.pack()
def main():
root = Tk()
app = App(root)
root.mainloop()
if __name__ == '__main__':
main()
|
nikdoof/dropbot
|
dropbot/bot.py
|
Python
|
mit
| 28,390 | 0.001937 |
from datetime import datetime
from xml.etree import ElementTree
import pkgutil
from json import loads as base_loads
from random import choice
import logging
import re
import urlparse
from sleekxmpp import ClientXMPP
from redis import Redis, ConnectionPool
import requests
from humanize import intcomma, naturaltime, intword
from pyzkb import ZKillboard
from eveapi import EVEAPIConnection
from dropbot.map import Map, base_range, ship_class_to_range
from dropbot.utils import EVEAPIRedisCache
from dropbot.stomp_listener import ZKillboardStompListener
urlparse.uses_netloc.append("redis")
zkillboard_regex = re.compile(r'http(s|):\/\/(?P<host>.*)\/kill\/(?P<killID>\d+)\/')
class UnknownCommandException(Exception):
pass
class DropBot(ClientXMPP):
def __init__(self, *args, **kwargs):
self.rooms = kwargs.pop('rooms', [])
self.nickname = kwargs.pop('nickname', 'Dropbot')
self.cmd_prefix = kwargs.pop('cmd_prefix', '!')
self.kos_url = kwargs.pop('kos_url', 'http://kos.cva-eve.org/api/')
self.hidden_commands = ['cmd_prefix']
self.last_killdate = datetime.utcnow()
self.kill_corps = [int(x) for x in kwargs.pop('kill_corps', [])]
self.kills_disabled = kwargs.pop('kills_disabled', '0') == '1'
self.kills_muted = False
self.office_api_key_keyid = kwargs.pop('office_api_keyid', None)
self.office_api_key_vcode = kwargs.pop('office_api_vcode', None)
self.market_systems = kwargs.pop('market_systems', ['Jita', 'Amarr', 'Rens', 'Dodixie', 'Hek'])
if 'redis_url' in kwargs:
self.redis_pool = ConnectionPool.from_url(kwargs.pop('redis_url', 'redis://localhost:6379/0'))
self.redis = Redis(connection_pool=self.redis_pool)
else:
logging.warning('No DROPBOT_REDIS_URL defined, EVE API calls will not be cached!')
self.redis = None
self.map = Map.from_json(pkgutil.get_data('dropbot', 'data/map.json'))
jid = kwargs.pop('jid', None)
password = kwargs.pop('password', None)
super(DropBot, self).__init__(jid, password)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0045') # Multi-User Chat
self.register_plugin('xep_0199') # XMPP Ping
# Basic bot auto config
self.auto_subscribe = False
self.auto_authorize = True
# Handlers
self.add_event_handler('session_start', self.handle_session_start)
self.add_event_handler('message', self.handle_message)
# Reference Data
@property
def types(self):
if not hasattr(self, '_types'):
data = pkgutil.get_data('dropbot', 'data/types.json')
self._types = base_loads(data)
return self._types
@property
def stations(self):
if not hasattr(self, '_stations'):
data = pkgutil.get_data('dropbot', 'data/stations.json')
self._stations = base_loads(data)
logging.debug('Getting ConquerableStationList')
for x in self.get_eveapi().eve.ConquerableStationList().outposts:
self._stations[unicode(x.stationID)] = x.solarSystemID
return self._stations
# Command / Connection Handling
def handle_session_start(self, event):
self.get_roster()
self.send_presence()
# Join the defined MUC rooms
for room in self.rooms:
self.plugin['xep_0045'].joinMUC(room, self.nickname, wait=True)
# Start the killchecker if we have corps to monitor
if len(self.kill_corps) > 0 and not self.kills_disabled:
logging.info('Starting ZKB Stomp monitor for corps: {}'.format(', '.join(self.kill_corps)))
self.stomp = ZKillboardStompListener(self)
self.stomp.connect('tcp://eve-kill.net:61613')
else:
logging.info('Kill monitoring disabled.')
def call_command(self, command, *args, **kwargs):
if hasattr(self, 'cmd_%s' % command):
try:
resp = getattr(self, 'cmd_%s' % command)(*args, **kwargs)
except:
resp = 'Oops, something went wrong...'
logging.getLogger(__name__).exception('Error handling command')
if resp:
if isinstance(resp, tuple) and len(resp) == 2:
return resp
else:
return resp, None
else:
return None, None
else:
raise UnknownCommandException
def handle_message(self, msg):
args = msg['body'].split(' ')
cmd = args[0].lower()
args.pop(0)
if msg['type'] == 'groupchat':
if msg['mucnick'] == self.nickname:
return
if msg['body'][0] != self.cmd_prefix:
# If its not a command, check for ZKB urls
seen = set([])
response_lines = []
for match in zkillboard_regex.finditer(msg['body']):
kill_id = match.groupdict()['killID']
host = match.groupdict()['host']
logging.info('Found Kill ID {}'.format(kill_id))
if kill_id in seen:
continue
body, html = self.call_command('kill', [kill_id], msg, no_url=True, host=host)
response_lines.append(body)
seen.add(kill_id)
response_lines = [x for x in response_lines if x]
if len(response_lines):
msg.reply('\n'.join(response_lines)).send()
return
# Strip the cmd_prefix
cmd = cmd[1:]
# Call the command
try:
body, html = self.call_command(cmd, args, msg)
except UnknownCommandException:
if msg['type'] != 'groupchat':
msg.reply('Unknown command, use "help" to list all commands available').send()
pass
else:
|
if body:
|
msg.reply(body).send()
# Helpers
def _system_picker(self, name):
systems = self.map.get_systems(name)
if len(systems) > 1:
if len(systems) > 10:
return 'More than 10 systems match {}, please provide a more complete name'.format(name)
return 'Did you mean: {}?'.format(', '.join([self.map.get_system_name(x) for x in systems]))
elif len(systems) == 0:
return 'No systems found matching {}'.format(name)
else:
return systems[0]
def _item_picker(self, item):
if item.strip() == '':
return 'Usage: !price <item>'
if item.lower() == 'plex':
return (u"29668", u"30 Day Pilot's License Extension (PLEX)")
types = dict([(i, v) for i, v in self.types.iteritems() if item.lower() in v.lower()])
if len(types) == 0:
return "No items named {} found".format(item)
elif len(types) > 1:
for i, v in types.iteritems():
if item.lower() == v.lower():
return (i, v)
else:
if len(types) > 10:
return "More than 10 items found, please narrow down what you want."
return "Did you mean: {}?".format(
', '.join(types.itervalues())
)
return types.popitem()
def _get_evecentral_price(self, type_id, system_id):
try:
resp = requests.get('http://api.eve-central.com/api/marketstat?typeid={}&usesystem={}'.format(type_id, system_id))
root = ElementTree.fromstring(resp.content)
except:
return None
return (float(root.findall("./marketstat/type[@id='{}']/sell/min".format(type_id))[0].text),
float(root.findall("./marketstat/type[@id='{}']/buy/max".format(type_id))[0].text))
def _system_price(self, args, msg, system, system_id):
item = ' '.join(args)
res = self._item_picker(item)
if isinstance(res, basestring):
|
prerit2010/web-frontend
|
config.py
|
Python
|
gpl-3.0
| 49 | 0 |
SERV
|
ER_HOSTNAME = "127.0.0.1"
SE
|
RVER_PORT = 9671
|
pokermania/pokerengine
|
tests/test_game.py
|
Python
|
gpl-3.0
| 244,876 | 0.009131 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 - 2010 Loic Dachary <loic@dachary.org>
# Copyright (C) 2006 Mekensleep
#
# Mekensleep
# 26 rue des rosiers
# 75004 Paris
# licensing@mekensleep.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Authors:
# Pierre-Andre (05/2006)
# Loic Dachary <loic@dachary.org>
#
import os
import sys
import shutil
import libxml2
import string
import tempfile
import math
import unittest
from os import path
TESTS_PATH = path.dirname(path.realpath(__file__))
sys.path.insert(0, path.join(TESTS_PATH, ".."))
from tests.log_history import log_history
from collections import namedtuple
from pokerengine import pokercards
from pokerengine import pokergame
from tests.testmessages import search_output, clear_all_messages, get_messages
try:
from nose.plugins.attrib import attr
except ImportError, e:
def attr(fn): return fn
CallbackIds = None
CallbackArgs = None
# ---------------------------------------------------------
def InitCallback():
global CallbackIds
global CallbackArgs
CallbackIds = None
CallbackArgs = None
# ---------------------------------------------------------
def Callback(id, *args):
global CallbackIds
global CallbackArgs
if not CallbackIds: CallbackIds = []
if not CallbackArgs: CallbackArgs = []
CallbackIds.append(id)
CallbackArgs.append(args)
# ---------------------------------------------------------
class PokerPredefinedDecks:
def __init__(self, decks):
self.decks = decks
self.index = 0
def shuffle(self, deck):
deck[:] = self.decks[self.index][:]
self.index += 1
if self.index >= len(self.decks):
self.index = 0
# ---------------------------------------------------------
class PokerGameTestCase(unittest.TestCase):
TestConfDirectory = path.join(TESTS_PATH, 'test-data/conf')
TestVariantInvalidFile = 'unittest.variant.invalid.xml'
TestVariantTemplateFile = 'unittest.variant.template.xml'
TestConfigTemplateFile = 'unittest.config.template.xml'
TestLevelsTemplateFile = 'unittest.levels.template.xml'
TestUrl = 'unittest.%s.xml'
TestConfigTemporaryFile = 'config'
TestVariantTemporaryFile = 'variant'
# ---------------------------------------------------------
def setUp(self):
self.VariantInvalidFile = path.join(PokerGameTestCase.TestConfDirectory, PokerGameTestCase.TestVariantInvalidFile)
self.ConfigTmplFile = path.join(PokerGameTestCase.TestConfDirectory, PokerGameTestCase.TestConfigTemplateFile)
self.VariantTmplFile = path.join(PokerGameTestCase.TestConfDirectory, PokerGameTestCase.TestVariantTemplateFile)
self.LevelsTmplFile = path.join(PokerGameTestCase.TestConfDirectory, PokerGameTestCase.TestLevelsTemplateFile)
self.ConfigTempFile = path.join(tempfile.gettempdir(), PokerGameTestCase.TestUrl % PokerGameTestCase.TestConfigTemporaryFile)
self.VariantTempFile = path.join(tempfile.gettempdir(), PokerGameTestCase.TestUrl % PokerGameTestCase.TestVariantTemporaryFile)
self.CreateGameServer()
self.InitGame()
InitCallback()
# ---------------------------------------------------------
def tearDown(self):
self.DeleteFile(self.ConfigTempFile)
self.DeleteFile(self.VariantTempFile)
# ---------------------------------------------------------
def testUniq(self):
"""Test Poker Game: Uniq"""
self.failUnlessEqual(pokergame.uniq([1, 4, 4, 7]).sort(), [1, 4, 7].sort())
self.failUnlessEqual(pokergame.uniq([1, 4, 4, 7, 3, 3, 3, 9, 7]).sort(), [1, 3, 4, 7, 9].sort())
# ---------------------------------------------------------
def testGetSerialByNameNoCase(self):
"""Test Poker Game: Get serial by name no case sensitive"""
# Create players
player1 = self.AddPlayerAndSit(1, 2)
player2 = self.AddPlayerAndSit(2, 7)
# Set the player's name
player1.name = 'Player1'
player2.name = 'Player2'
# Seach player by his name (no case sensitive)
self.failUnlessEqual(self.game.getSerialByNa
|
meNoCase('player1'), 1)
self.failUnlessEqual(self.game.getSerialByNameNoCase('pLaYEr2'), 2)
self.failUnlessEqual(self.game.getSerialByNameNoCa
|
se('unknown'), 0)
# ---------------------------------------------------------
def testSetPosition(self):
"""Test Poker Game: Set position"""
self.game.setMaxPlayers(3)
# Create players
player1 = self.AddPlayerAndSit(1, 2)
player2 = self.AddPlayerAndSit(2, 5)
player3 = self.AddPlayerAndSit(3, 7)
# Position initially set to -1
self.failUnlessEqual(self.game.position, -1)
# The game is not running, the set position function is not avalaible
self.failIf(self.game.isRunning())
self.game.setPosition(5)
self.failUnlessEqual(self.game.position, -1)
# Blind and ante turn
self.game.forced_dealer_seat = 2
self.game.beginTurn(1)
self.failUnless(self.game.isBlindAnteRound())
# The game is running, the set position function is available
self.failUnless(self.game.isRunning())
self.game.setPosition(2)
self.failUnlessEqual(self.game.position, 2)
self.failUnlessEqual(self.game.getSerialInPosition(), 3)
# Invalid position
self.game.setPosition(-1)
self.failUnlessEqual(self.game.getSerialInPosition(), 0)
# ---------------------------------------------------------
def testPokerGameSetInvalidMaxPlayer(self):
"""Test Poker Game: Set an invalid number max of player"""
# The minimum number of player is 2
self.game.setMaxPlayers(0)
self.failUnlessEqual(self.game.seatsLeftCount(), 0)
self.failUnlessEqual(self.game.seatsCount(), 0)
self.game.setMaxPlayers(1)
self.failUnlessEqual(self.game.seatsLeftCount(), 0)
self.failUnlessEqual(self.game.seatsCount(), 0)
# The maximum number of player is sepcified by the ABSOLUTE_MAX_PLAYERS constant
self.game.setMaxPlayers(pokergame.ABSOLUTE_MAX_PLAYERS + 1)
self.failUnlessEqual(self.game.seatsLeftCount(), 0)
self.failUnlessEqual(self.game.seatsCount(), 0)
# ---------------------------------------------------------
def testPokerGameSetValidMaxPlayer(self):
"""Test Poker Game: Set a valid number max of player"""
# Test all the valid numbers of player
for num in range(2,pokergame.ABSOLUTE_MAX_PLAYERS):
self.game.setMaxPlayers(num)
self.failUnlessEqual(self.game.seatsLeftCount(), num)
self.failUnlessEqual(self.game.seatsCount(), num)
# ---------------------------------------------------------
def testSetSeats(self):
"""Test Poker Game: Set seats"""
# Set the number maximum of players, the available seats are [1, 3, 6, 8]
self.game.setMaxPlayers(4)
# Create players
for player in range(1, 5):
player = self.AddPlayerAndSit(player)
# Set the seats of all the players
seats
|
withanage/HEIDIEditor
|
static/WysiwigEditor/cgi/createJSON.py
|
Python
|
gpl-3.0
| 4,094 | 0.006839 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import xmltodict
from bs4 import BeautifulSoup
from bs4 import CData
UPLOAD_DIR = '../html/uploads'
METADATA_NAME = 'metadata.xml'
PUBLISHER_NAME = 'Heidelberg University Press'
PUBLISHER_LOC = 'Heidelberg'
jsondata = [{'selected': True, 'type': 'book'}]
def predictTagset(d):
if(d.has_key('metadata')):
if (d['metadata'].has_key('front')):
return 'article'
elif(d['metadata'].has_key('book-meta')):
return 'book'
elif(d.has_key('article')):
return 'article'
elif(d.has_key('book')):
return 'book'
else:
return ''
def validate(xmldict):
if(xmldict['tagset'] == 'article'):
if not(xmldict['article']['front']['journal-meta'].has_key('publisher')):
xmldict['article']['front']['journal-meta']['publisher'] = {'publisher-name' : PUBLISHER_NAME, 'publisher-loc': PUBLISHER_LOC}
else:
if not (xmldict['article']['front']['journal-meta']['publisher'].has_key('publisher_loc')):
xmldict['article']['front']['journal-meta']['publisher']['publisher_loc'] = PUBLISHER_LOC
if not(xmldict['article']['front']['journal-meta'].has_key('journal-title-group')):
if (isinstance(xmldict['article']['front']['journal-meta']['journal-id'], str)):
xmldict['article']['front']['journal-meta']['journal-id'] = {'@pub-type': 'epub', '#text': xmldict['article']['front']['journal-meta']['journal-id']}
xmldict['article']['front']['journal-meta']['journal-title-group'] = {'journal-title': xmldict['article']['front']['journal-meta']['journal-id']['#text']}
if(xmldict['article']['front']['article-meta'].has_key('contrib-group')):
if not(isinstance(xmldict['article']['front']['article-meta']['contrib-group'], list)):
xmldict['article']['front']['article-meta']['contrib-group'] = [xmldict['article']['front']['article-meta']['contrib-group']]
elif(mxldict['tagset'] == 'book'):
if(xmldict['book']['book-meta'].has_key('contrib-group')):
if not(isinstance(xmldict['book']['book-meta']['contrib-group'], list)):
xmldict['book']['book-meta']['contrib-group'] = [xmldict['book']['book-meta']['contrib-group']]
return xmldict
def escape(xml):
soup = BeautifulSoup(xml, 'xml', from_encoding='utf-8')
for i in soup.find_all('p'):
if i.string is None:
string = ''.join([str(j) for j in i.contents])
cdata = CData(string)
i.string = ''
|
i.string.replace_with(cdata)
for i in soup.find_all('mixed-citation'):
if i.string is None:
string = ''.join([str(j) for j in i.contents])
cdata = CData(string)
i.string = ''
i.string.replace_with(cdata)
return str(soup)
for root, dirs, files in os.walk(UPLOAD_DIR):
if(METADATA_NAME in files):
xmldata = open(root+"/"+METADATA_NAME).read()
xmldata = escape(xmlda
|
ta)
xmldict = xmltodict.parse(xmldata)
tagset = predictTagset(xmldict)
xmldict['tagset'] = tagset
xmldict['id'] = root.split('/')[-1]
xmldict[tagset] = xmldict['metadata']
del xmldict['metadata']
xmldict = validate(xmldict)
jsondata[0].update(xmldict)
if('xml' in dirs):
xmlfiles = os.listdir(root+"/xml")
for xml in xmlfiles:
xmldata = open(root+"/xml/"+xml)
xmldata = escape(xmldata)
xmldict = xmltodict.parse(xmldata)
xmldict['selected'] = False
xmldict['type'] = 'file'
xmldict['tagset'] = predictTagset(xmldict)
filename = os.path.splitext(xml)[0]
xmldict['id'] = filename
xmldict = validate(xmldict)
jsondata.append(xmldict)
break
#with open('test.json', 'w+') as f:
# json.dump(jsondata, f, sort_keys=False, indent=4)
print "Content-type: application/json\n\n"
print json.JSONEncoder().encode(jsondata)
print
|
hjuutilainen/autopkg-virustotalanalyzer
|
VirusTotalAnalyzer/VirusTotalAnalyzer.py
|
Python
|
apache-2.0
| 12,577 | 0.001193 |
#!/usr/bin/env python
#
# Copyright 2016 Hannes Juutilainen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import json
import hashlib
import time
from autopkglib import Processor, ProcessorError
__all__ = ["VirusTotalAnalyzer"]
# VirusTotal was kind enough to give this processor its own API key so that it can be
# used as-is without further configuring. Please don't abuse this.
DEFAULT_API_KEY = "3858a94a911f47707717f6d090dbb8f86badb750b0f7bfe74a55c0c6143e3de6"
# Default options
DEFAULT_SLEEP = 15
ALWAYS_REPORT_DEFAULT = False
AUTO_SUBMIT_DEFAULT = False
AUTO_SUBMIT_MAX_SIZE_DEFAULT = 419430400 # 400MB
class VirusTotalAnalyzer(Processor):
"""Queries VirusTotal database for information about the given file"""
input_variables = {
"pathname": {
"required": False,
"description": "File path to analyze.",
},
"VIRUSTOTAL_ALWAYS_REPORT": {
"required": False,
"description": "Always request a report instead of only for new downloads",
},
|
"VIRUSTOTAL_AUTO_SUBMIT": {
"required": False,
"description": "If item is not found in VirusTotal database, automatically submit it for scanning.",
},
"CURL_PATH": {
"required": False,
"default": "/usr/bin/curl",
"description": "Path to curl binary. Defaults to /usr/bin/curl.",
},
}
output_variables = {
"virus_total_analyzer_summary_result": {
|
"description": "Description of interesting results."
},
}
description = __doc__
def fetch_content(self, url, headers=None, form_parameters=None, data_parameters=None, curl_options=None):
"""Returns content retrieved by curl, given an url and an optional
dictionaries of header-name/value mappings and parameters.
Logic here borrowed from URLTextSearcher processor.
Keyword arguments:
:param url: The URL to fetch
:type url: str None
:param headers: Dictionary of header-names and values
:type headers: dict None
:param form_parameters: Dictionary of items for '--form'
:type form_parameters: dict None
:param data_parameters: Dictionary of items for '--data'
:type data_parameters: dict None
:param curl_options: Array of arguments to pass to curl
:type curl_options: list None
:returns: content as string
"""
try:
cmd = [self.env['CURL_PATH'], '--location']
if curl_options:
cmd.extend(curl_options)
if headers:
for header, value in headers.items():
cmd.extend(['--header', '%s: %s' % (header, value)])
if form_parameters:
for form_parameter, value in form_parameters.items():
cmd.extend(['--form', '%s=%s' % (form_parameter, value)])
if data_parameters:
for data_parameter, value in data_parameters.items():
cmd.extend(['--data', '%s=%s' % (data_parameter, value)])
cmd.append(url)
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(data, stderr) = proc.communicate()
if proc.returncode:
raise ProcessorError(
'Could not retrieve URL %s: %s' % (url, stderr))
except OSError:
raise ProcessorError('Could not retrieve URL: %s' % url)
return data
def submit_file(self, file_path, api_key):
"""Submit a file to VirusTotal for scanning
:param file_path: Path to a file to upload
:param api_key: API key to use
:returns: JSON response
"""
url = "https://www.virustotal.com/vtapi/v2/file/scan/upload_url"
# Get the upload URL
parameters = {"apikey": api_key}
f = self.fetch_content(url, None, None, parameters, ["-G"])
try:
json_data = json.loads(f)
except (ValueError, KeyError, TypeError) as e:
self.output("Response was: %s" % f)
self.output("JSON format error: %s" % e)
json_data = json.loads(
'{"response_code": 999, "verbose_msg": "Requesting upload URL failed..."}')
return json_data
upload_url = json_data.get('upload_url', None)
if upload_url is None:
return None
# Upload the file
file_path_for_post = "@%s" % file_path
parameters = {"file": file_path_for_post, "apikey": api_key}
f = self.fetch_content(upload_url, None, parameters)
try:
json_data = json.loads(f)
except (ValueError, KeyError, TypeError) as e:
self.output("Response was: %s" % f)
self.output("JSON format error: %s" % e)
json_data = json.loads(
'{"response_code": 999, "verbose_msg": "Request failed, perhaps rate-limited..."}')
# print json.dumps(json_data, sort_keys=True, indent=4)
return json_data
def report_for_hash(self, file_hash, api_key):
"""Request a VirusTotal report for a hash
:param file_hash: md5, sha1 or sha256 hash
:param api_key: API key to use
:returns: JSON response
"""
url = "https://www.virustotal.com/vtapi/v2/file/report"
parameters = {"resource": file_hash, "apikey": api_key}
f = self.fetch_content(url, None, parameters)
try:
json_data = json.loads(f)
except (ValueError, KeyError, TypeError) as e:
self.output("JSON response was: %s" % f)
self.output("JSON format error: %s" % e)
json_data = json.loads(
'{"response_code": 999, "verbose_msg": "Request failed, perhaps rate-limited..."}')
# print json.dumps(json_data, sort_keys=True, indent=4)
return json_data
def calculate_sha256(self, file_path):
"""Calculates a SHA256 checksum
http://stackoverflow.com/a/3431838
:param file_path:
"""
hash_sha256 = hashlib.sha256()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def main(self):
if self.env.get("VIRUSTOTAL_DISABLED", False):
self.output("Skipped VirusTotal analysis...")
return
input_path = self.env.get("pathname", None)
if not input_path:
self.output("Skipping VirusTotal analysis: no input path defined.")
return
# Get variables and arguments
sleep_seconds = int(self.env.get("VIRUSTOTAL_SLEEP_SECONDS", DEFAULT_SLEEP))
auto_submit = self.env.get("VIRUSTOTAL_AUTO_SUBMIT", AUTO_SUBMIT_DEFAULT)
auto_submit_max_size = int(self.env.get("VIRUSTOTAL_AUTO_SUBMIT_MAX_SIZE", AUTO_SUBMIT_MAX_SIZE_DEFAULT))
api_key = self.env.get("VIRUSTOTAL_API_KEY", DEFAULT_API_KEY)
if not api_key or api_key == "":
raise ProcessorError("No API key available")
force_report = self.env.get("VIRUSTOTAL_ALWAYS_REPORT",
ALWAYS_REPORT_DEFAULT)
if "download_changed" in self.env:
if not self.env["download_changed"] and not force_report:
# URLDownloader did not download new items,
# so skip the analysis
self.output("Skipping VirusTotal analysis: no new download.")
self.env["virustotal_result"] = "SKIPPED"
|
kopringo/Scarky2
|
Scarky2/builder/migrations/0002_auto_20150505_2035.py
|
Python
|
mit
| 577 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migr
|
ations
class Migration(migrations.Migration):
dependencies = [
('builder', '0001_initial'),
|
]
operations = [
migrations.AlterField(
model_name='problem',
name='date_start',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AlterField(
model_name='problem',
name='date_stop',
field=models.DateTimeField(null=True, blank=True),
),
]
|
credativ/pulp
|
server/test/unit/server/managers/repo/test_dependency.py
|
Python
|
gpl-2.0
| 4,601 | 0.004347 |
from .... import base
from pulp.devel import mock_plugins
from pulp.plugins.conduits.dependency import DependencyResolutionConduit
from pulp.plugins.config import PluginCallConfiguration
from pulp.plugins.types import database, model
from pulp.server.db.model.criteria import UnitAssociationCriteria
from pulp.server.db.model.repository import Repo, RepoImporter, RepoContentUnit
from pulp.server.exceptions import MissingResource
from pulp.server.managers import factory as manager_factory
TYPE_1_DEF = model.TypeDefinition('type-1', 'Type 1', 'Test Definition One',
['key-1'], ['search-1'], [])
class DependencyManagerTests(base.PulpServerTests):
def setUp(self):
super(DependencyManagerTests, self).setUp()
mock_plugins.install()
database.update_database([TYPE_1_DEF])
self.repo_id = 'dep-repo'
self.manager = manager_factory.dependency_manager()
manager_factory.repo_manager().create_repo(self.repo_id)
manager_factory.repo_importer_manager().set_importer(self.repo_id, 'mock-importer', {})
def tearDown(self):
super(DependencyManagerTests, self).tearDown()
mock_plugins.reset()
def clean(self):
super(DependencyManagerTests, self).clean()
database.clean()
Repo.get_collection().remove()
RepoImporter.get_collection().remove()
RepoContentUnit.get_collection().remove()
mock_plugins.MOCK_IMPORTER.resolve_dependencies.return_value = None
def test_resolve_dependencies_by_unit(self):
# Setup
report = 'dep report'
mock_plugins.MOCK_IMPORTER.resolve_dependencies.return_value = report
unit_id_1 = manager_factory.content_manager().add_co
|
ntent_unit('type-1', None,
{'key-1': 'v1'})
|
unit_id_2 = manager_factory.content_manager().add_content_unit('type-1', None,
{'key-1': 'v2'})
association_manager = manager_factory.repo_unit_association_manager()
association_manager.associate_unit_by_id(self.repo_id, 'type-1', unit_id_1)
association_manager.associate_unit_by_id(self.repo_id, 'type-1', unit_id_2)
# Test
result = self.manager.resolve_dependencies_by_units(self.repo_id, [], {})
# Verify
self.assertEqual(result, report)
self.assertEqual(1, mock_plugins.MOCK_IMPORTER.resolve_dependencies.call_count)
args = mock_plugins.MOCK_IMPORTER.resolve_dependencies.call_args[0]
self.assertEqual(args[0].id, self.repo_id)
self.assertEqual(len(args[1]), 0)
self.assertTrue(isinstance(args[2], DependencyResolutionConduit))
self.assertTrue(isinstance(args[3], PluginCallConfiguration))
def test_resolve_dependencies_by_unit_no_repo(self):
# Test
self.assertRaises(MissingResource, self.manager.resolve_dependencies_by_units, 'foo', [],
{})
def test_resolve_dependencies_by_unit_no_importer(self):
# Setup
manager_factory.repo_manager().create_repo('empty')
# Test
self.assertRaises(MissingResource, self.manager.resolve_dependencies_by_units, 'empty', [],
{})
def test_resolve_dependencies_by_criteria(self):
# Setup
report = 'dep report'
mock_plugins.MOCK_IMPORTER.resolve_dependencies.return_value = report
unit_id_1 = manager_factory.content_manager().add_content_unit('type-1', None,
{'key-1': 'unit-id-1'})
unit_id_2 = manager_factory.content_manager().add_content_unit('type-1', None,
{'key-1': 'dep-1'})
association_manager = manager_factory.repo_unit_association_manager()
association_manager.associate_unit_by_id(self.repo_id, 'type-1', unit_id_1)
association_manager.associate_unit_by_id(self.repo_id, 'type-1', unit_id_2)
criteria = UnitAssociationCriteria(type_ids=['type-1'], unit_filters={'key-1': 'unit-id-1'})
# Test
result = self.manager.resolve_dependencies_by_criteria(self.repo_id, criteria, {})
# Verify
self.assertEqual(report, result)
self.assertEqual(1, mock_plugins.MOCK_IMPORTER.resolve_dependencies.call_count)
args = mock_plugins.MOCK_IMPORTER.resolve_dependencies.call_args[0]
self.assertEqual(1, len(args[1]))
|
mistercrunch/airflow
|
airflow/operators/bash.py
|
Python
|
apache-2.0
| 8,272 | 0.003264 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from typing import Dict, Optional, Sequence
from airflow.compat.functools import cached_property
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.hooks.subprocess import SubprocessHook
from airflow.models import BaseOperator
from airflow.utils.context import Context
from airflow.utils.operator_helpers import context_to_airflow_vars
class BashOperator(BaseOperator):
|
r"""
Execute a Bash script, command or set of commands.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BashOperator`
If BaseOperator.do_xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command
|
completes
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed. (templated)
:type bash_command: str
:param env: If env is not None, it must be a dict that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:param append_env: If False(default) uses the environment variables passed in env params
and does not inherit the current process environment. If True, inherits the environment variables
from current passes and then environment variable passed by the user will either update the existing
inherited environment variables or the new variables gets appended to it
:type append_env: bool
:param output_encoding: Output encoding of bash command
:type output_encoding: str
:param skip_exit_code: If task exits with this exit code, leave the task
in ``skipped`` state (default: 99). If set to ``None``, any non-zero
exit code will be treated as a failure.
:type skip_exit_code: int
:param cwd: Working directory to execute the command in.
If None (default), the command is run in a temporary directory.
:type cwd: str
Airflow will evaluate the exit code of the bash command. In general, a non-zero exit code will result in
task failure and zero will result in task success. Exit code ``99`` (or another set in ``skip_exit_code``)
will throw an :class:`airflow.exceptions.AirflowSkipException`, which will leave the task in ``skipped``
state. You can have all non-zero exit codes be treated as a failure by setting ``skip_exit_code=None``.
.. list-table::
:widths: 25 25
:header-rows: 1
* - Exit code
- Behavior
* - 0
- success
* - `skip_exit_code` (default: 99)
- raise :class:`airflow.exceptions.AirflowSkipException`
* - otherwise
- raise :class:`airflow.exceptions.AirflowException`
.. note::
Airflow will not recognize a non-zero exit code unless the whole shell exit with a non-zero exit
code. This can be an issue if the non-zero exit arises from a sub-command. The easiest way of
addressing this is to prefix the command with ``set -e;``
Example:
.. code-block:: python
bash_command = "set -e; python3 script.py '{{ next_execution_date }}'"
.. note::
Add a space after the script name when directly calling a ``.sh`` script with the
``bash_command`` argument -- for example ``bash_command="my_script.sh "``. This
is because Airflow tries to apply load this file and process it as a Jinja template to
it ends with ``.sh``, which will likely not be what most users want.
.. warning::
Care should be taken with "user" input or when using Jinja templates in the
``bash_command``, as this bash operator does not perform any escaping or
sanitization of the command.
This applies mostly to using "dag_run" conf, as that can be submitted via
users in the Web UI. Most of the default template variables are not at
risk.
For example, do **not** do this:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command='echo "Here is the message: \'{{ dag_run.conf["message"] if dag_run else "" }}\'"',
)
Instead, you should pass this via the ``env`` kwarg and use double-quotes
inside the bash_command, as below:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command="echo \"here is the message: '$message'\"",
env={"message": '{{ dag_run.conf["message"] if dag_run else "" }}'},
)
"""
template_fields: Sequence[str] = ('bash_command', 'env')
template_fields_renderers = {'bash_command': 'bash', 'env': 'json'}
template_ext: Sequence[str] = (
'.sh',
'.bash',
)
ui_color = '#f0ede4'
def __init__(
self,
*,
bash_command: str,
env: Optional[Dict[str, str]] = None,
append_env: bool = False,
output_encoding: str = 'utf-8',
skip_exit_code: int = 99,
cwd: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
self.skip_exit_code = skip_exit_code
self.cwd = cwd
self.append_env = append_env
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
@cached_property
def subprocess_hook(self):
"""Returns hook for running the bash command"""
return SubprocessHook()
def get_env(self, context):
"""Builds the set of environment variables to be exposed for the bash command"""
system_env = os.environ.copy()
env = self.env
if env is None:
env = system_env
else:
if self.append_env:
system_env.update(env)
env = system_env
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug(
'Exporting the following env vars:\n%s',
'\n'.join(f"{k}={v}" for k, v in airflow_context_vars.items()),
)
env.update(airflow_context_vars)
return env
def execute(self, context: Context):
if self.cwd is not None:
if not os.path.exists(self.cwd):
raise AirflowException(f"Can not find the cwd: {self.cwd}")
if not os.path.isdir(self.cwd):
raise AirflowException(f"The cwd {self.cwd} must be a directory")
env = self.get_env(context)
result = self.subprocess_hook.run_command(
command=['bash', '-c', self.bash_command],
env=env,
output_encoding=self.output_encoding,
cwd=self.cwd,
)
if self.skip_exit_code is not None and result.exit_code == self.skip_exit_code:
raise AirflowSkipException(f"Bash command returned exit code {self.skip_exit_code}. Skipping.")
elif result.exit_code != 0:
raise AirflowException(
f'Bash command failed. The command returned a non-zero exit code {result.exit_code}.'
)
retur
|
power12317/weblate
|
weblate/trans/views.py
|
Python
|
gpl-3.0
| 68,329 | 0.000688 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2013 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.shortcuts import render_to_response, get_object_or_404
from django.views.decorators.cache import cache_page
from weblate.trans import appsettings
from django.core.servers.basehttp import FileWrapper
from django.utils.translation import ugettext as _
import django.utils.translation
from django.template import RequestContext, loader
from django.http import (
HttpResponse, HttpResponseRedirect, HttpResponseNotFound, Http404
)
from django.contrib import messages
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test
)
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q, Count, Sum
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.utils.safestring import mark_safe
from weblate.trans.models import (
Project, SubProject, Translation, Unit, Suggestion, Check,
Dictionary, Change, Comment, get_versions
)
from weblate.lang.models import Language
from weblate.trans.checks import CHECKS
from weblate.trans.forms import (
TranslationForm, UploadForm, SimpleUploadForm, ExtraUploadForm, SearchForm,
MergeForm, AutoForm, WordForm, DictUploadForm, ReviewForm, LetterForm,
AntispamForm, CommentForm
)
from weblate.trans.util import join_plural
from weblate.accounts.models import Profile, send_notification_email
import weblate
from whoosh.analysis import StandardAnalyzer, StemmingAnalyzer
import datetime
import logging
import os.path
import json
import csv
from xml.etree import ElementTree
import urllib2
# See https://code.djangoproject.com/ticket/6027
class FixedFileWrapper(FileWrapper):
def __iter__(self):
self.filelike.seek(0)
return self
logger = logging.getLogger('weblate')
def home(request):
'''
Home page of Weblate showing list of projects, stats
and user links if logged in.
'''
projects = Project.objects.all_acl(request.user)
acl_projects = projects
if projects.count() == 1:
projects = SubProject.objects.filter(project=projects[0])
# Warn about not filled in username (usually caused by migration of
# users from older system
if not request.user.is_anonymous() and request.user.get_full_name() == '':
messages.warning(
request,
_('Please set your full name in your profile.')
)
# Load user translations if user is authenticated
usertranslations = None
if request.user.is_authenticated():
profile = request.user.get_profile()
usertranslations = Translation.objects.filter(
language__in=profile.languages.all()
).order_by(
'subproject__project__name', 'subproject__name'
)
# Some stats
top_translations = Profile.objects.order_by('-translated')[:10]
top_suggestions = Profile.objects.order_by('-suggested')[:10]
last_changes = Change.objects.filter(
translation__subproject__project__in=acl_projects,
).order_by( '-timestamp')[:10]
return render_to_response('index.html', RequestContext(request, {
'projects': projects,
'top_translations': top_translations,
'top_suggestions': top_suggestions,
'last_changes': last_changes,
'last_changes_rss': reverse('rss'),
'usertranslations': usertranslations,
}))
def show_checks(request):
'''
List of failing checks.
'''
allchecks = Check.objects.filter(
ignore=False
).values('check').annotate(count=Count('id'))
return render_to_response('checks.html', RequestContext(request, {
'checks': allchecks,
'title': _('Failing checks'),
}))
def show_check(request, name):
'''
Details about failing check.
'''
try:
check = CHECKS[name]
except KeyError:
raise Http404('No check matches the given query.')
checks = Check.objects.filter(
check=name, ignore=False
).values('project__slug').annotate(count=Count('id'))
return render_to_response('check.html', RequestContext(request, {
'checks': checks,
'title': check.name,
'check': check,
}))
def show_check_project(request, name, project):
'''
Show checks failing in a project.
'''
prj = get_object_or_404(Project, slug=project)
prj.check_acl(request)
try:
check = CHECKS[name]
except KeyError:
raise Http404('No check matches the given query.')
units = Unit.objects.none()
if check.target:
langs = Check.objects.filter(
check=name, project=prj, ignore=False
).values_list('language', flat=True).distinct()
for lang in langs:
checks = Check.objects.filter(
check=name, project=prj, language=lang, ignore=False
).values_list('checksum', flat=True)
res = Unit.objects.filter(
checksum__in=checks,
translation__language=lang,
translation__subproject__project=prj,
translated=True
).values(
'translation__subproject__slug',
'translation__subproject__project__slug'
).annotate(count=Count('id'))
units |= res
if check.source:
checks = Check.objects.filter(
check=name,
project=prj,
language=None,
ignore=False
).values_list(
'checksum', flat=True
)
for subproject in prj.subproject_set.all():
lang = subproject.translation_set.all()[0].language
res = Unit.objects.filter(
checksum__in=checks,
translation__language=lang,
translation__subproject=subproject
).values(
'translation__subproject__slug',
'translation__subproject__project__slug'
).annotate(count=Count('id'))
units |= res
return render_to_response('check_project.html', RequestContext(request, {
'checks': units,
'title': '%s/%s' % (prj.__unicode__(), check.name),
'check': check,
'project': prj,
}))
def show_check_subproject(request, name, project, subproject):
'''
Show checks failing in a subproject.
'''
subprj = get_object_or_404(
SubProject,
slug=subproject,
project__slug=project
)
subprj.check_acl(request)
try:
check = CHECKS[name]
except KeyError:
raise Http404('No check matches the given query.')
units = Unit.objects.none()
if check.target:
langs = Check.objects.filter(
check=name,
project=subprj.project,
|
ignore=False
).values_list(
'language', flat=True
).distinct()
for lang in langs:
checks = Check.objects.filter(
check=name,
project=subprj.project,
language=lang,
ignore=False
).values_list('checksum', flat=True)
res = Unit.objects.filter(
translation__subproject=subprj,
checksum__in=checks,
transl
|
ation__language=lang,
translated=True
).values(
'translation__language_
|
keen99/SickRage
|
sickbeard/dailysearcher.py
|
Python
|
gpl-3.0
| 4,533 | 0.002647 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import threading
import traceback
import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard import common
from sickbeard import helpers
from sickbeard import exceptions
from sickbeard import network_timezones
from sickbeard.exceptions import ex
from sickbeard.common import SKIPPED
from common import Quality, qualityPresetStrings, statusStrings
class DailySearcher():
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False):
if self.amActive:
return
self.amActive = True
logger.log(u"Searching for new released episodes ...")
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() + datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND (airdate <= ? and
|
airdate > 1)",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show or show.paused:
|
continue
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
try:
end_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs,
show.network) + datetime.timedelta(
minutes=helpers.tryInt(show.runtime, 60))
# filter out any episodes that haven't aried yet
if end_time > curTime:
continue
except:
# if an error occured assume the episode hasn't aired yet
continue
UpdateWantedList = 0
ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
with ep.lock:
if ep.season == 0:
logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED because is a special season")
ep.status = common.SKIPPED
elif sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT:
ep.status = common.SKIPPED
UpdateWantedList = 1
else:
logger.log(u"New episode %s airs today, setting to default episode status for this show: %s" % (ep.prettyName(), common.statusStrings[ep.show.default_ep_status]))
ep.status = ep.show.default_ep_status
sql_l.append(ep.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
else:
logger.log(u"No new released episodes found ...")
sickbeard.traktRollingScheduler.action.updateWantedList()
# queue episode for daily search
dailysearch_queue_item = sickbeard.search_queue.DailySearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(dailysearch_queue_item)
self.amActive = False
|
gautam1858/tensorflow
|
tensorflow/python/keras/backend_test.py
|
Python
|
apache-2.0
| 68,908 | 0.005819 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.
|
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =================
|
=============================================================
"""Tests for Keras backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import scipy.sparse
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def compare_single_input_op_to_numpy(keras_op,
np_op,
input_shape,
dtype='float32',
negative_values=True,
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
inputs = 2. * np.random.random(input_shape)
if negative_values:
inputs -= 1.
keras_output = keras_op(keras.backend.variable(inputs, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
def compare_two_inputs_op_to_numpy(keras_op,
np_op,
input_shape_a,
input_shape_b,
dtype='float32',
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
input_a = np.random.random(input_shape_a)
input_b = np.random.random(input_shape_b)
keras_output = keras_op(keras.backend.variable(input_a, dtype=dtype),
keras.backend.variable(input_b, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(input_a.astype(dtype), input_b.astype(dtype),
*np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
@test_util.run_all_in_graph_and_eager_modes
class BackendUtilsTest(test.TestCase):
def test_backend(self):
self.assertEqual(keras.backend.backend(), 'tensorflow')
def test_get_reset_uids(self):
self.assertEqual(keras.backend.get_uid('foo'), 1)
self.assertEqual(keras.backend.get_uid('foo'), 2)
keras.backend.reset_uids()
self.assertEqual(keras.backend.get_uid('foo'), 1)
def test_learning_phase(self):
with self.cached_session() as sess:
keras.backend.set_learning_phase(1)
self.assertEqual(keras.backend.learning_phase(), 1)
with self.assertRaises(ValueError):
keras.backend.set_learning_phase(2)
# Test running with a learning-phase-consuming layer
keras.backend.set_learning_phase(0)
x = keras.Input((3,))
y = keras.layers.BatchNormalization()(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
sess.run(y, feed_dict={x: np.random.random((2, 3))})
def test_learning_phase_scope(self):
initial_learning_phase = keras.backend.learning_phase()
with keras.backend.learning_phase_scope(1) as lp:
self.assertEqual(lp, 1)
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with keras.backend.learning_phase_scope(0) as lp:
self.assertEqual(lp, 0)
self.assertEqual(keras.backend.learning_phase(), 0)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with self.assertRaises(ValueError):
with keras.backend.learning_phase_scope(None):
pass
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
def test_int_shape(self):
x = keras.backend.ones(shape=(3, 4))
self.assertEqual(keras.backend.int_shape(x), (3, 4))
if not context.executing_eagerly():
x = keras.backend.placeholder(shape=(None, 4))
self.assertEqual(keras.backend.int_shape(x), (None, 4))
def test_in_train_phase(self):
y1 = keras.backend.variable(1)
y2 = keras.backend.variable(2)
if context.executing_eagerly():
with keras.backend.learning_phase_scope(0):
y_val_test = keras.backend.in_train_phase(y1, y2).numpy()
with keras.backend.learning_phase_scope(1):
y_val_train = keras.backend.in_train_phase(y1, y2).numpy()
else:
y = keras.backend.in_train_phase(y1, y2)
f = keras.backend.function([keras.backend.learning_phase()], [y])
y_val_test = f([0])[0]
y_val_train = f([1])[0]
self.assertAllClose(y_val_test, 2)
self.assertAllClose(y_val_train, 1)
def test_is_keras_tensor(self):
x = keras.backend.variable(1)
self.assertEqual(keras.backend.is_keras_tensor(x), False)
x = keras.Input(shape=(1,))
self.assertEqual(keras.backend.is_keras_tensor(x), True)
with self.assertRaises(ValueError):
keras.backend.is_keras_tensor(0)
def test_stop_gradient(self):
x = keras.backend.variable(1)
y = keras.backend.stop_gradient(x)
if not context.executing_eagerly():
self.assertEqual(y.op.name[:12], 'StopGradient')
xs = [keras.backend.variable(1) for _ in range(3)]
ys = keras.backend.stop_gradient(xs)
if not context.executing_eagerly():
for y in ys:
self.assertEqual(y.op.name[:12], 'StopGradient')
@test_util.run_all_in_graph_and_eager_modes
class BackendVariableTest(test.TestCase):
def test_zeros(self):
x = keras.backend.zeros((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones(self):
x = keras.backend.ones((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.ones((3, 4)))
def test_eye(self):
x = keras.backend.eye(4)
val = keras.backend.eval(x)
self.assertAllClose(val, np.eye(4))
def test_zeros_like(self):
x = keras.backend.zeros((3, 4))
y = keras.backend.zeros_like(x)
val = keras.backend.eval(y)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones_like(self):
x = keras.backend.zeros((3, 4))
y = keras.backend.ones_like(x)
val = keras.bac
|
scollis/iris
|
lib/iris/tests/unit/plot/test_pcolor.py
|
Python
|
gpl-3.0
| 1,395 | 0 |
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You shou
|
ld have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.pcolor` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.tests.unit.plot import TestGraphicStringCoord
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.ski
|
p_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.pcolor(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.pcolor(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
if __name__ == "__main__":
tests.main()
|
huanchenz/STX-h-store
|
tests/scripts/xml2/__init__.py
|
Python
|
gpl-3.0
| 262 | 0 |
"""XML parser package.
This package parses the XML
|
file returned by the Graffiti tracker.
"""
from xmlparser import XMLParser
from xmlgenerator import XMLGenerator
from exceptions import *
__all__ = ["XMLParser", "XMLGenerator", "
|
XMLException", "InvalidXML"]
|
LLNL/spack
|
var/spack/repos/builtin/packages/casacore/package.py
|
Python
|
lgpl-2.1
| 4,875 | 0.001846 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Casacore(CMakePackage):
"""A suite of c++ libraries for radio astronomy data processing."""
homepage = "https://github.com/casacore/casacore"
url = "https://github.com/casacore/casacore/archive/v2.4.1.tar.gz"
maintainers = ['mpokorny']
version('3.4.0', sha256='31f02ad2e26f29bab4a47a2a69e049d7bc511084a0b8263360e6157356f92ae1')
version('3.3.0', sha256='3a714644b908ef6e81489b792cc9b80f6d8267a275e15d38a42a6a5137d39d3d')
version('3.2.0', sha256='ae5d3786cb6dfdd7ebc5eecc0c724ff02bbf6929720bc23be43a027978e79a5f')
version('3.1.2', sha256='ac94f4246412eb45d503f1019cabe2bb04e3861e1f3254b832d9b1164ea5f281')
version('3.1.1', sha256='85d2b17d856592fb206b17e0a344a29330650a4269c80b87f8abb3eaf3dadad4')
version('3.1.0', sha256='a6adf2d77ad0d6f32995b1e297fd88d31ded9c3e0bb8f28966d7b35a969f7897')
version('3.0.0', sha256='6f0e68fd77b5c96299f7583a03a53a90980ec347bff9dfb4c0abb0e2933e6bcb')
version('2.4.1', sha256='58eccc875053b2c6fe44fe53b6463030ef169597ec29926936f18d27b5087d63')
depends_on('cmake@3.7.1:', type='build')
variant('openmp', default=False, description='Build OpenMP support')
variant('shared', default=True, description='Build shared libraries')
variant('readline', default=True, description='Build readline support')
# see note below about the reason for disabling the "sofa" variant
# variant('sofa', default=False, description='Build SOFA support')
variant('adios2', default=False, description='Build ADIOS2 support')
variant('fftpack', default=False, description='Build FFTPack')
variant('hdf5', default=False, description='Build HDF5 support')
variant('python', default=False, description='Build python support')
# Force dependency on readline in v3.2 and earlier. Although the
# presence of readline is tested in CMakeLists.txt, and casacore
# can be built without it, there's no way to control that
# dependency at build time; since many systems come with readline,
# it's better to explicitly depend on it here always.
depends_on('readline', when='@:3.2.0')
depends_on('readline', when='+readline')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('cfitsio')
depends_on('wcslib@4.20:+cfitsio')
depends_on('fftw@3.0.0: precision=float,double', when='@3.4.0:')
depends_on('fftw@3.0.0: precision=float,double', when='~fftpack')
# SOFA dependency suffers the same problem in CMakeLists.txt as readline;
# force a dependency when building unit tests
depends_on('sofa-c', type='test')
depends_on('hdf5', when='+hdf5')
depends_on('adios2+mpi', when='+adios2')
depends_on('mpi', when='+adios2')
depends_on('python@2.6:', when='+python')
depends_on('boost+python', when='+python')
depends_on('py-numpy', when='+python')
def cmake_args(self):
args = []
spec = self.spec
args.append(self.define_from_variant('ENABLE_SHARED', 'shared'))
args.append(self.define_from_variant('USE_OPENMP', 'openmp'))
args.append(self.define_from_variant('USE_READLINE', 'readline'))
args.append(self.define_from_variant('USE_HDF5', 'hdf5'))
args.append(self.define_from_vari
|
ant('USE_ADIOS2', 'adios2'))
args.append(self.define_from_variant('USE_MPI', 'adios2'))
if spec.satisfies('+adios2'):
args.append(self.define('ENABLE_TABLELOCKING', False))
# fftw3 is required by casacore starting with v3.4.0, but the
# old fftpack is still available. For v3.4.0 and later, we
# always require FFTW3 dependency with the option
|
al addition
# of FFTPack. In older casacore versions, only one of FFTW3 or
# FFTPack can be selected.
if spec.satisfies('@3.4.0:'):
if spec.satisfies('+fftpack'):
args.append('-DBUILD_FFTPACK_DEPRECATED=YES')
args.append(self.define('USE_FFTW3', True))
else:
args.append(self.define('USE_FFTW3', spec.satisfies('~fftpack')))
# Python2 and Python3 binding
if spec.satisfies('~python'):
args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=NO'])
elif spec.satisfies('^python@3.0.0:'):
args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=YES'])
else:
args.extend(['-DBUILD_PYTHON=YES', '-DBUILD_PYTHON3=NO'])
args.append('-DBUILD_TESTING=OFF')
return args
def patch(self):
# Rely on CMake ability to find hdf5, available since CMake 3.7.X
os.remove('cmake/FindHDF5.cmake')
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Common/Core/Testing/Python/TestGhost.py
|
Python
|
mit
| 1,901 | 0.001578 |
"""Test ghost object support in VTK-Python
When PyVTKObject is destroyed, the vtkObjectBase that it
contained often continues to exist because references to
it still exist within VTK. When that vtkObjectBase is
returned to python, a new PyVTKObject is created.
If the PyVTKObject has a custom class or a custom dict,
then we make a "ghost" of the PyVTKObject when it is
destroyed, so that if its vtkObjectBase returns to python,
the PyVTKObject can be restored with the proper class and
dict. Each ghost has a weak pointer to its vtkObjectBase
so that it can be erased if the vtkObjectBase is destroyed.
To be tested:
- make sure custom dicts are restored
- make sure custom classes are restored
Created on Aug 19, 2010 by David Gobbi
"""
import sys
import exceptions
import vtk
from vtk.test import Testing
class vtkCustomObject(vtk.vtkObject):
pass
class TestGhost(Testing.vtkTest):
def testGhostForDict(self):
"""Ghost an object to save the dict"""
|
o = vtk.vtkObject()
o.customattr = 'hello'
a = vtk.vtkVariantArray()
a.InsertNextValue(o)
i = id(o)
del o
o = vtk.vtkObject()
o = a.GetValue(0).ToVTKObject()
# make sure the id has changed, but dict the same
self.assertEqual(o.customattr, 'hello')
self.assertNotEqual(i, id(o))
def testGhostForClass(self):
"""Ghost an object to save the class"""
|
o = vtkCustomObject()
a = vtk.vtkVariantArray()
a.InsertNextValue(o)
i = id(o)
del o
o = vtk.vtkObject()
o = a.GetValue(0).ToVTKObject()
# make sure the id has changed, but class the same
self.assertEqual(o.__class__, vtkCustomObject)
self.assertNotEqual(i, id(o))
if __name__ == "__main__":
Testing.main([(TestGhost, 'test')])
|
kailIII/emaresa
|
rent.resp/partner.py
|
Python
|
agpl-3.0
| 1,548 | 0.008398 |
# -*- coding: utf-8 -*-
#
|
#############################################################################
#
# Author: OpenDrive Ltda
# Copyright (c) 2013 Opendrive Ltda
#
# WARNING: This program as such is intended to b
|
e used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class Partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'legal_representative': fields.char(
'Legal Representative',
),
}
|
koalakoker/knote_gcrypt
|
GCryptNote/PyMyPackage.py
|
Python
|
gpl-2.0
| 807 | 0.007435 |
'''
Created on 23/mag/2015
@author: koala
'''
import Cript
key = b'dfdfjdnjnjvnfkjn vnfj vjfk d nvkfd j'
plaintext = b'jfghksdjfghksdjfgksdhgljdkghjh fgh fhg jfhgdkjfkjg hkdfjg hkdfj ghkdf
|
ghfdjk ghfdjkg hkdfjg h'
testoC
|
riptato, seme, orLen = Cript.criptIt(plaintext, key)
testoDecriptato = Cript.deCriptIt(testoCriptato, key, seme, orLen)
# dec = cipher.decrypt(msg)
# def pr(iStr):
# l = len(iStr)
# print l
# r = range(l)
# print r
# for i in r:
# print i,iStr[i]
# print (Cript.hashIt("pippa"))
print (plaintext)
Cript.printHex(plaintext)
print ("seme")
Cript.printHex(seme)
print ("Testo Criptato")
Cript.printHex(testoCriptato)
print ("Testo decriptato")
print(testoDecriptato)
Cript.printHex(testoDecriptato)
if (plaintext != testoDecriptato):
print ("Errore")
|
husky-prophet/personal-backup
|
PRETEND assessment/PRETEND assessment csv.py
|
Python
|
mit
| 4,661 | 0.039262 |
import csv
from subprocess import call#
from datetime import datetime#Importing various libraries
call(["color","F9"], shell=True)#
call(["cls"], shell = True)#Setting colour for shell
import sys, time#
import time#More libraries
prog=True#creating variable prog and assigning it as true
time.sleep(1)#
def typing(string):#Creating a function that types letter by letter
for c in string:#
sys.stdout.write(c)#
sys.stdout.flush()#
time.sleep(0.05)#
print#
def printing(string):#Creating another function that does the same, but faster
for c in string:#
sys.stdout.write(c)#
sys.stdout.flush()#
time.sleep(0.01)#<== Faster
print#
while prog==True:#
cid={
}
now=datetime.now()#variable to make time work
printing("=============================================")#using function "printing" to make text appear 1 character at a time
print "=== The time is currently ",'%s:%s:%s' % (now.hour, now.minute, now.second)," ==="#Printing the time
printing("=============================================")#
time.sleep(0.5)#
printing("=| WELCOME TO THE TILE SHOP! |=")#
time.sleep(0.5)#
printing("=| |=")#
time.sleep(0.5)#
printing("=| |=======================")#
time.sleep(0.5)#
name = raw_input ("=|What is your name? |= ")#asks for name
time.sleep(0.5)#
print "=|Welcome",name,"to the tile shop!!!!|="#greets user
time.sleep(0.5)
isCorrectNumber = False#=============================
while isCorrectNumber == False: # This loop checks is the input for length and width is float or int, not str
length = raw_input("=|Length of area in metres|= ") #
width = raw_input("=|Width of area in metres|= ") #
try:#
width = float(width)#
length = float(length)#
isCorrectNumber=True#
except ValueError:#
print "=|Invalid dimensions!|=" #
pass #==========================================
size = float(length)*float(width)#works out size
print "=|The room is",size,"metres squared, there are 4 types of tiles|="#
time.sleep(0.5)#
printing("=|Economy (asbestos) $1 per square metre|=")#
time.sleep(0.5)#
printing("=|Standard (granite) $5 per square metre|=")#
time.sleep(0.5)#
printing("=|Premium (marble) $10 per square metre|=")#
time.sleep(0.5)#
printing("=|Luxury (plutonium)$5000 per square metre|=")#
time.sleep(0.5)#
prog2=True#
while prog2==True:#This loop displays prices for room size
tileq=raw_input("=|Which type would you like?|= ").lower()#
if tileq=="economy":#
price = size*1#probably unnecessary as x*1=x
print "=|That will be $"+str(price)+"|="#
prog2=False#
elif tileq=="standard":#
price = size*5#
print "=|That will be $"+str(price)+"|="#
prog2=False#
elif tileq=="premium":#
price = size*10#
print "=|That will be $"+str(price)+"|="#
prog2=False#
elif tileq=="luxury":#
price = size*50000#
print "=|That will be $"+str(price)+"|="#
prog2=False#
prog3=True#
time.sleep(0.5)#
while prog3==True:#This loop is used to add the price of plaster if the user wants it.
tilep=raw_input("=|Would you like to buy plaster as well? It is $1 per square metre.|=").lower()#.lower is used to convert the input into lower case
time.sleep(0.5)#
if tilep=="yes":#
price = price+price*1#
|
print "=|That will be $"+str(price)+"|="#
prog3=False#
elif tilep=="no":#
prog3=False#
time.sleep(0.5)#
typing("Generating Unique Custom ID")#
customid=name+str(len(name))+str(len(tileq))+str(len(tilep))+
|
str(length)+str(width)#The ID is name[lengthofname][lengthofquality][lengthofprice][length][width]
print "Your Customer ID is",str(customid)#print ID
with open ("Tiledetails.csv","ab") as csvfile:
usr=csv.writer (csvfile, delimiter=",",
quotechar=",", quoting=csv.QUOTE_MINIMAL)
usr.writerow([name,customid,tileq,size,tilep,price])
cid[len(cid)+1]=(str(name)+str(customid))
print cid
typing("=|Thanks for tiling!|=")
time.sleep(120)
call(["cls"], shell = True)
time.sleep (10)#waits 10 seconds then restarts
|
Heufneutje/txircd
|
txircd/modules/core/accountdata.py
|
Python
|
bsd-3-clause
| 1,023 | 0.021505 |
from twisted.
|
plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implementer
from typing import Callable, List, Optional, Tuple
# Numerics and names are taken from the IRCv3.1 SASL specification at http://ircv3.net/specs/extensions/sasl-3.1.html
irc.RPL_LO
|
GGEDIN = "900"
irc.RPL_LOGGEDOUT = "901"
@implementer(IPlugin, IModuleData)
class AccountMetadata(ModuleData):
name = "AccountData"
core = True
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("usermetadataupdate", 10, self.sendLoginNumeric) ]
def sendLoginNumeric(self, user: "IRCUser", key: str, oldValue: str, value: str, fromServer: Optional["IRCServer"]) -> None:
if key == "account":
if value is None:
user.sendMessage(irc.RPL_LOGGEDOUT, user.hostmask(), "You are now logged out")
else:
user.sendMessage(irc.RPL_LOGGEDIN, user.hostmask(), value, "You are now logged in as {}".format(value))
accounts = AccountMetadata()
|
aetros/aetros-cli
|
aetros/commands/GPUCommand.py
|
Python
|
mit
| 1,123 | 0.005343 |
from __future__ import absolute_import, print_function, division
import argparse
import sys
class GPUCommand:
def __init__(self, logger):
self.logger = logger
self.client = None
self.registered = False
self.active = True
def main(self, args):
import aetros.cuda_gpu
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
prog=aetros.const.__prog__ + ' gpu')
try:
print("CUDA version: " +str(aetros.cuda_gpu.get_version()))
except aetros.cuda_gpu.CudaNotImplementedException:
sys.stderr.write('It seems you dont have NVIDIA CUDA not installed prop
|
erly.')
sys.exit(2)
for gpu in aetros.cuda_gpu.get_ordered_devices():
properties = aetros.cuda_gpu.get_device_properties(gpu['device'], all=True)
free, total = aetros.cuda_gpu.get_memory(gpu['device'])
print("%s GPU id=%s %s (memory %.2fGB, free %.2fGB)" %(gpu['fullId'], str(gpu['id']), properties['name'], total/1024/1024/1024, free/1024/1
|
024/1024))
|
LSIR/gsn
|
gsn-webui/app/urls.py
|
Python
|
gpl-3.0
| 712 | 0 |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), nam
|
e='home')
Includin
|
g another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
urlpatterns = [
url(r'^', include('gsn.urls')),
]
|
sandeva/appspot
|
settings.py
|
Python
|
apache-2.0
| 3,965 | 0.002774 |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for google-app-engine-django project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'dummy' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = ''
|
# Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATA
|
BASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hvhxfm5u=^*v&doo#oq8x*eg8+1&9sxbye@=umutgn^t_sg_nx'
# Ensure that email is not sent via SMTP by default to match the standard App
# Engine SDK behaviour. If you want to sent email via SMTP then add the name of
# your mailserver here.
EMAIL_HOST = ''
TEMPLATE_DIRS = ("mysite.templates")
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'google.appengine.ext.ndb.django_middleware.NdbDjangoMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.doc.XViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
# 'django.core.context_processors.media', # 0.97 only.
# 'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates')
)
INSTALLED_APPS = (
'astro',
'astro.location',
'astro.chart',
# 'appengine_django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
|
borqsat/TCT-lite
|
setup.py
|
Python
|
gpl-2.0
| 506 | 0.047431 |
#!/usr/bin/python
from setuptools import setup, find_packages
setup(
name = "testkit-lite",
description = "Test runner for test execution",
url = "https://github.com/testkit/testkit-lite",
author = "Cathy Shen",
author_email = "cathy.shen@
|
intel.com",
version = "2.3.4",
inclu
|
de_package_data = True,
data_files = [('/opt/testkit/lite/',
('VERSION', 'doc/testkit-lite_user_guide_for_tct.pdf'))],
scripts = ('testkit-lite',),
packages = find_packages(),
)
|
jannewulf/Anki-Translator
|
TranslatorAddon/GUI/TranslatorDialog.py
|
Python
|
gpl-3.0
| 7,181 | 0.004178 |
from PyQt4.QtGui import *
from PyQt4.QtCore import Qt
from aqt.utils import tooltip
from TranslatorAddon.Parser.PONSParser import PONSParser
# This class describes the Dialog Window in which a vocable can be translated
class TranslatorDialog(QDialog):
col0Width = 40
def __init__(self, vocable, defaultSourceLanguage, defaultTargetLanguage, defaultLoadGrammarInfos):
super(TranslatorDialog, self).__init__()
# save default values
self.defaultSrc = defaultSourceLanguage
self.defaultTgt = defaultTargetLanguage
self.defaultGram = defaultLoadGrammarInfos
# Save the looked up vocable (not updated -> use lineEdit to get current value)
self.editorVocable = vocable
self.translations = []
self.parser = PONSParser()
# set up gui
self.setupUi()
# setting up ui elements
def setupUi(self):
# Set up window
self.setWindowTitle("Translator")
self.setModal(True)
self.resize(800, 600)
self.createSettings()
# create vocab line edit, translations table etc.
self.createTranslContent()
# Add Ok and Cancel buttons
self.createButtonBox()
# bring ui elements together in main layout
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.settingsBox)
mainLayout.addWidget(self.translContentLayout)
mainLayout.addWidget(self.buttonBox)
self.setLayout(mainLayout)
self.lineEditVocable.setFocus()
def createSettings(self):
self.settingsBox = QGroupBox("Settings")
self.cmbBoxSourceLang = QComboBox()
self.cmbBoxSourceLang.addItems(sorted(self.parser.getSourceLanguages().values()))
try:
defaultLangCode = self.parser.getSourceLanguages()[self.defaultSrc]
except Exception:
defaultLangCode = ""
index = self.cmbBoxSourceLang.findText(defaultLangCode)
if index >= 0:
self.cmbBoxSourceLang.setCurrentIndex(index)
self.cmbBoxTargetLang = QComboBox()
self.updateTargetLanguages()
self.cmbBoxSourceLang.currentIndexChanged.
|
connect(self.updateTargetLanguages)
self.chkBoxGrammarInfo = QCheckBox()
self.chkBoxGrammarInfo.setChecked(self.defaultGram)
layout = QHBoxLayout()
layout.addWidget(QLabel("Source Language"))
layout.addWidget(self.cmbBoxSourceLang)
|
layout.addStretch(1)
layout.addWidget(QLabel("Target Language"))
layout.addWidget(self.cmbBoxTargetLang)
layout.addStretch(1)
layout.addWidget(self.chkBoxGrammarInfo)
layout.addWidget(QLabel("Load Grammar Infos"))
self.settingsBox.setLayout(layout)
# creates all the gui elements except for the button box on the bottom
def createTranslContent(self):
self.translContentLayout = QGroupBox("Translations")
layout = QFormLayout()
# translate button
self.buttonTranslate = QPushButton("Translate")
self.buttonTranslate.clicked.connect(self.translate)
# vocabulary line edit
self.lineEditVocable = QLineEdit(self.editorVocable)
self.lineEditVocable.returnPressed.connect(self.buttonTranslate.click)
# translations table
self.tableTranslations = QTableWidget()
self.tableTranslations.setColumnCount(3)
self.tableTranslations.setHorizontalHeaderLabels(["Use", "Vocable", "Translation"])
self.tableTranslations.horizontalHeader().setResizeMode(QHeaderView.Interactive)
self.tableTranslations.horizontalHeader().setStretchLastSection(True)
self.tableTranslations.horizontalHeader().resizeSection(0, self.col0Width)
self.tableTranslations.horizontalHeader().resizeSection(1, (self.tableTranslations.size().width() - self.col0Width) / 2)
self.tableTranslations.verticalHeader().hide()
policy = QSizePolicy()
policy.setHorizontalPolicy(policy.Expanding)
policy.setVerticalPolicy(policy.Expanding)
policy.setVerticalStretch(1)
self.tableTranslations.setSizePolicy(policy)
layout.addRow(QLabel("Vocable"), self.lineEditVocable)
layout.addRow(None, self.buttonTranslate)
layout.addRow(QLabel("Translations"), self.tableTranslations)
self.translContentLayout.setLayout(layout)
# creates the 'Ok' and 'Cancel' buttons
def createButtonBox(self):
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.buttonBox.accepted.connect(self.setFieldsAndAccept)
self.buttonBox.rejected.connect(self.reject)
# called function on click on translate button
def translate(self):
vocab = self.lineEditVocable.text()
src = self.parser.getLangCode(str(self.cmbBoxSourceLang.currentText()))
tgt = self.parser.getLangCode(str(self.cmbBoxTargetLang.currentText()))
grammarInfos = self.chkBoxGrammarInfo.isChecked()
translations = self.parser.getTranslation(vocab, src, tgt, grammarInfos)
self.setTableContent(translations)
# updating the content of the table
def setTableContent(self, content):
if content is None:
return
if len(content) == 0:
tooltip("No translations found.")
return
self.tableTranslations.setRowCount(len(content))
for i, row in enumerate(content):
for j, col in enumerate(row):
if j == 0:
chkBoxItem = QTableWidgetItem()
chkBoxItem.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
chkBoxItem.setCheckState(Qt.Unchecked)
self.tableTranslations.setItem(i, j, chkBoxItem)
item = QTableWidgetItem(col)
self.tableTranslations.setItem(i, j + 1, item)
# collect selected translations and return to editor window
def setFieldsAndAccept(self):
rows = self.tableTranslations.rowCount()
for i in range(rows):
if self.tableTranslations.item(i, 0).checkState() == Qt.Checked:
self.translations.append(
[self.tableTranslations.item(i, 1).text(),
self.tableTranslations.item(i, 2).text()])
self.accept()
# Prevent the dialog from closing on enter pressed
def keyPressEvent(self, QKeyEvent):
if QKeyEvent.key() == Qt.Key_Enter or QKeyEvent.key() == Qt.Key_Return:
return
# Update the target languages in the target combo box
def updateTargetLanguages(self):
self.cmbBoxTargetLang.clear()
current = str(self.cmbBoxSourceLang.currentText())
key = self.parser.getLangCode(current)
self.cmbBoxTargetLang.addItems(sorted(self.parser.getTargetLanguages(key).values()))
try:
defaultLangCode = self.parser.getSourceLanguages()[self.defaultTgt]
except Exception:
defaultLangCode = ""
index = self.cmbBoxTargetLang.findText(defaultLangCode)
if index >= 0:
self.cmbBoxTargetLang.setCurrentIndex(index)
|
dims/cinder
|
cinder/zonemanager/fc_zone_manager.py
|
Python
|
apache-2.0
| 11,517 | 0 |
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
ZoneManager is responsible to manage access control using FC zoning
when zoning mode is set as 'fabric'.
ZoneManager provides interfaces to add connection and remove connection
for given initiator and target list associated with a FC volume attach and
detach operation.
**Related Flags**
:zone_driver: Used by:class:`ZoneManager`.
Defaults to
`cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver`
:zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none'
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
from cinder import exception
from cinder.i18n import _, _LI
from cinder.volume import configuration as config
from cinder.zonemanager import fc_common
import cinder.zonemanager.fczm_constants as zone_constant
LOG = logging.getLogger(__name__)
zone_manager_opts = [
cfg.StrOpt('zone_driver',
default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver',
help='FC Zone Driver responsible for zone management'),
cfg.StrOpt('zoning_policy',
default='initiator-target',
help='Zoning policy configured by user; valid values include '
'"initiator-target" or "initiator"'),
cfg.StrOpt('fc_fabric_names',
help='Comma separated list of Fibre Channel fabric names.'
' This list of names is used to retrieve other SAN credentials'
' for connecting to each SAN fabric'),
cfg.StrOpt('fc_san_lookup_service',
default='cinder.zonemanager.drivers.brocade'
'.brcd_fc_san_lookup_service.BrcdFCSanLookupService',
help='FC SAN Lookup Service')
]
CONF = cfg.CONF
CONF.register_opts(zone_manager_opts, group='fc-zone-manager')
class ZoneManager(fc_common.FCCommon):
"""Manages Connection control during attach/detach.
Version History:
1.0 - Initial version
1.0.1 - Added __new__ for singleton
1.0.2 - Added friendly zone name
"""
VERSION = "1.0.2"
driver = None
fabric_names = []
def __new__(class_, *args, **kwargs):
if not hasattr(class_, "_instance"):
class_._instance = object.__new__(class_)
return class_._instance
def __init__(self, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
super(ZoneManager, self).__init__(**kwargs)
self.configuration = config.Configuration(zone_manager_opts,
'fc-zone-manager')
self._build_driver()
def _build_driver(self):
zone_driver = self.configuration.zone_driver
LOG.debug("Zone driver from config: %(driver)s",
{'driver': zone_driver})
zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager')
# Initialize vendor specific implementation of FCZoneDriver
self.driver = importutils.import_object(
zone_driver,
configuration=zm_config)
def get_zoning_state_ref_count(self, initiator_wwn, target_wwn):
"""Zone management state check.
Performs state check for given I-T pair to return the current count of
active attach for the pair.
"""
# TODO(sk): ref count state management
count = 0
# check the state for I-T pair
return count
def add_connection(self, conn_info):
"""Add connection control.
Adds connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
host_name = None
storage_system = None
try:
initiator_target_map = (
conn_info[zone_constant.DATA][zone_constant.IT_MAP])
if zone_constant.HOST in conn_info[zone_constant.DATA]:
host_name = conn_info[
zone_constant.DATA][
zone_constant.HOST].replace(" ", "_")
if zone_constant.STORAGE in conn_info[zone_constant.DATA]:
storage_system = (
conn_info[
zone_constant.DATA][
zone_constant.STORAGE].replace(" ", "_"))
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.debug("Target list : %(targets)s",
{'targets': target_list})
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Fabric map after context lookup: %(fabricmap)s",
{'fabricmap': fabric_map})
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
|
i_t_map, True)
LOG.info(_LI("Final filtered map for fabric: %(i_t_map)s"),
{'i_t_map': valid_i_t_map})
# Call driver to add connection control
|
self.driver.add_connection(fabric, valid_i_t_map,
host_name, storage_system)
LOG.info(_LI("Add connection: finished iterating "
"over all target list"))
except Exception as e:
msg = _("Failed adding connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def delete_connection(self, conn_info):
"""Delete connection.
Updates/deletes connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
host_name = None
storage_system = None
try:
initiator_target_map = (
conn_info[zone_constant.DATA][zone_constant.IT_MAP])
if zone_constant.HOST in conn_info[zone_constant.DATA]:
host_name = conn_info[zone_constant.DATA][zone_constant.HOST]
if zone_constant.STORAGE in conn_info[zone_constant.DATA]:
storage_system = (
conn_info[
zone_constant.DATA][
zone_constant.STORAGE].replace(" ", "_"))
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.info(_LI("Delete connection target list: %(targets)s"),
{'targets': target_list})
# get SAN context for the target li
|
Princu7/open-event-orga-server
|
migrations/versions/b5abafa45063_.py
|
Python
|
gpl-3.0
| 1,145 | 0.013974 |
"""empty message
Revision ID: b5abafa45063
Revises: 4e5dd0df14b5
Create Date: 2016-08-06 22:29:36.948000
"""
# revision identifiers, used by Alembic.
revision = 'b5abafa45063'
down_revision = '4e5dd0df14b5'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stripe_authorizations',
sa.Column('id',
|
sa.Integer(), nullable=False),
sa.Column('stripe_secret_key', sa.String(), nullable=True),
sa.Column('stripe_refresh_token', sa.String(), nullable=True),
sa.C
|
olumn('stripe_publishable_key', sa.String(), nullable=True),
sa.Column('stripe_user_id', sa.String(), nullable=True),
sa.Column('stripe_email', sa.String(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stripe_authorizations')
### end Alembic commands ###
|
scattm/DanceCat
|
DanceCat/Console/__init__.py
|
Python
|
mit
| 2,383 | 0 |
"""This module include console commands for DanceCat."""
from __future__ import print_function
import datetime
import sqlalchemy.exc
from dateutil.relativedelta import relativedelta
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from DanceCat import app, db, Models, Constants
# pylint: disable=C0103
migrate = Migrate(app, db)
manager = Manager(app)
# pylint: enable=C0103
@manager.command
def list_all():
"""List all commands."""
print('Init database:')
print('- db_create_all')
print('Migrate Database')
print('- db init')
print('- db migrate')
print('- db upgrade')
print('- db downgrade')
print('Scheduling')
print('- schedule_update')
return True
@manager.command
def db_create_all():
"""DanceCat data
|
base initial."""
db.create_all()
@manage
|
r.command
def schedule_update():
"""Update outdated schedules on offline time."""
schedules = Models.Schedule.query.filter(
Models.Schedule.is_active,
Models.Schedule.schedule_type != Constants.SCHEDULE_ONCE,
Models.Schedule.next_run <= datetime.datetime.now()
).all()
while len(schedules) > 0:
for schedule in schedules:
print(
"Update next run time for schedule with id {id}.".format(
id=schedule.schedule_id
)
)
schedule.update_next_run(True)
schedule.next_run += relativedelta(minutes=1)
db.session.commit()
schedules = Models.Schedule.query.filter(
Models.Schedule.is_active,
Models.Schedule.next_run < datetime.datetime.now()
).all()
print("Finished!")
@manager.command
def add_allowed_user(email):
"""
Add given email to allowed_email table.
:param email: Given email that will be allowed to create new user.
:return: None.
"""
try:
allowed_email = Models.AllowedEmail(email)
db.session.add(allowed_email)
db.session.commit()
print("Added \"{email}\" to allowed users list.".format(
email=email
))
except sqlalchemy.exc.IntegrityError:
print("\"{email}\" was already in the allowed users list.".format(
email=email
))
db.session.close()
# Add Migrate commands.
manager.add_command('db', MigrateCommand)
|
snazy2000/netbox
|
netbox/dcim/formfields.py
|
Python
|
apache-2.0
| 607 | 0 |
from __future__ import unicode_literals
from netaddr import
|
EUI, AddrFormatError
from django import forms
from django.core.exceptions import ValidationError
#
# Form fi
|
elds
#
class MACAddressFormField(forms.Field):
default_error_messages = {
'invalid': "Enter a valid MAC address.",
}
def to_python(self, value):
if not value:
return None
if isinstance(value, EUI):
return value
try:
return EUI(value, version=48)
except AddrFormatError:
raise ValidationError("Please specify a valid MAC address.")
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/test/test_winreg.py
|
Python
|
gpl-2.0
| 21,678 | 0.000554 |
# Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
import os, sys, errno
import unittest
from test import support
import threading
from platform import machine
# Do this first so test will be skipped if module doesn't exist
support.import_module('winreg', required_on=['win'])
# Now import everything
from winreg import *
try:
REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
REMOTE_NAME = None
# tuple of (major, minor)
WIN_VER = sys.getwindowsversion()[:2]
# Some tests should only run on 64-bit architectures where WOW64 will be.
WIN64_MACHINE = True if machine() == "AMD64" else False
# Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses
# registry reflection and formerly reflected keys are shared instead.
# Windows 7 and Windows Server 2008 R2 are version 6.1. Due t
|
o this, some
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
# Use a per-process key to prevent concurrent test runs
|
(buildbot!) from
# stomping on each other.
test_key_base = "Python Test Key [%d] - Delete Me" % (os.getpid(),)
test_key_name = "SOFTWARE\\" + test_key_base
# On OS'es that support reflection we should test with a reflected key
test_reflect_key_name = "SOFTWARE\\Classes\\" + test_key_base
test_data = [
("Int Value", 45, REG_DWORD),
("Qword Value", 0x1122334455667788, REG_QWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", b"binary\x00data", REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", b"x"*(2**14), REG_BINARY),
# Two and three kanjis, meaning: "Japan" and "Japanese")
("Japanese 日本", "日本語", REG_SZ),
]
class BaseWinregTests(unittest.TestCase):
def setUp(self):
# Make sure that the test key is absent when the test
# starts.
self.delete_tree(HKEY_CURRENT_USER, test_key_name)
def delete_tree(self, root, subkey):
try:
hkey = OpenKey(root, subkey, 0, KEY_ALL_ACCESS)
except OSError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except OSError:
# no more subkeys
break
self.delete_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
def _write_test_data(self, root_key, subkeystr="sub_key",
CreateKey=CreateKey):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
self.assertTrue(key.handle != 0)
# Create a sub-key
sub_key = CreateKey(key, subkeystr)
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEqual(nkeys, 1, "Not the correct number of sub keys")
self.assertEqual(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "Not the correct number of sub keys")
self.assertEqual(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except OSError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except OSError:
pass
def _read_test_data(self, root_key, subkeystr="sub_key", OpenKey=OpenKey):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEqual(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, subkeystr) as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except OSError:
break
self.assertEqual(data in test_data, True,
"Didn't read back the correct test data")
index = index + 1
self.assertEqual(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEqual(read_val, value_data,
"Could not directly read the value")
self.assertEqual(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEqual(read_val, subkeystr, "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except OSError:
pass
key.Close()
def _delete_test_data(self, root_key, subkeystr="sub_key"):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, subkeystr, 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "subkey not empty before delete")
self.assertEqual(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, subkeystr)
try:
# Shouldn't be able to delete it twice!
DeleteKey(key, subkeystr)
self.fail("Deleting the key twice succeeded")
except OSError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except OSError: # Use this error name this time
pass
def _test_all(self, root_key, subkeystr="sub_key"):
self._write_test_data(root_key, subkeystr)
self._read_test_data(root_key, subkeystr)
self._delete_test_data(root_key, subkeystr)
def _test_named_args(self, key, sub_key):
with CreateKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as ckey:
self.assertTrue(ckey.handle != 0)
with OpenKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as okey:
self.assertTrue(okey.handle != 0)
class LocalWinregTests(BaseWinregTests):
def test_registry_works(self):
self._test_all(HKEY
|
ex0hunt/redrat
|
func/viewer.py
|
Python
|
bsd-2-clause
| 2,852 | 0.00495 |
from common.connector import redmine
class ViewIssues:
def __init__(self, view_type, assigned_to='me', minimal_priority=0, exclude_projects=(), milestone=None):
self.redmine = redmine()
self.assigned_to = assigned_to
self.minimal_priority = minimal_priority
self.exclude_projects = exclude_projects
self.milestone = milestone
if view_type == 'list':
self.filtered_issues()
elif view_type == 'users':
self.users = self.user_list()
else:
self.num_issues()
def colorify_priority(self, state):
if state.id == 2:
return '\033[92m'
elif state.id == 4:
return '\033[1m\033[91m**'
elif state.id == 3:
return '\033[91m'
elif state.id == 1:
return '\033[94m'
else:
return '\033[94m'
def filtered_issues(self):
i_count = 0
for p in self.redmine.project.all():
if p.name in self.exclude_projects:
continue
issues = self.redmine.issue.filter(project_id=p.identifier, assigned_to_id=self.assigned_to)
if len(issues) == 0:
continue
print('Проект: %s (%s)' % (p.name, p.identifier))
for i in issues:
try:
if self.mileston
|
e != 'all' and str(i.fixed_version) not in self.milestone:
continue
except:
continue
i_count += 1
if i.priority.id < self.minimal_priority:
continue
color_priority = self.colorify_priority(i.priority)
color_state = self.colorify_pri
|
ority(i.status)
end_color = '\033[0m'
print('[%s%s%s][%s%s%s]\t%i:\t%s' %(color_priority,
i.priority,
end_color,
color_state,
i.status,end_color,
i.id, i))
print('='*10)
print('Всего: %i' % i_count)
def num_issues(self):
issues = self.redmine.issue.filter(assigned_to_id=self.assigned_to)
count = len([i for i in issues if i.priority.id >= self.minimal_priority if i.project.name not in self.exclude_projects])
print('Активных задач: %i' % count)
def user_list(self):
print(self.redmine)
return [u for u in self.redmine.users]
class Colorify:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
|
chrisb87/advent_of_code_2016
|
day13/test_day13.py
|
Python
|
unlicense
| 783 | 0.045977 |
import unittest
import pdb
from day13 import *
class TestDay13(unittest.TestCase):
def test_is_wall(self):
tests = (
(0, 0, False),
(1, 0, True),
(2, 0, False),
(-1, 0, True),
(0, -1, Tru
|
e),
)
for x, y, expected in tests:
self.assertEqual(
is_wall(x, y, 10),
expected,
"(%d,%d) should be %s" % (x,y, expected))
def test_solve_example(self):
solution = solve((1,1), (7,4), 10)
self.assertEqual(len(solution) - 1, 11)
@unittest.skip("slow")
def test_solve_part_1(self):
solution = solve((1,1), (31,39), 1350)
self.asser
|
tEqual(len(solution) - 1, 92)
@unittest.skip("slow")
def test_solve_part_2(self):
solution = solve((1,1), (31,39), 1350, 50)
self.assertEqual(solution, 124)
if __name__ == "__main__":
unittest.main()
|
JoeJasinski/WindyTransit
|
mobiletrans/urls.py
|
Python
|
mit
| 1,139 | 0.022827 |
from django.conf.urls import patterns, include, url
from .views import MapView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(a
|
dmin.site.urls)),
url('^$', 'mobi
|
letrans.views.index', { 'template_name':'index.html'},
name="index"),
url('^about/$', 'mobiletrans.views.about', { 'template_name':'about.html'},
name="about"),
url('^routemap/$', MapView.as_view( template_name='routemap.html'),
name="routemap"),
url('^transitheat/$', MapView.as_view( template_name='transitheat.html'),
name="transitheat"),
url('^kml/$', 'mobiletrans.mtlocation.views.renderkml', { },
name="mtlocation_renderkml"),
url('^kml/longlat/(?P<long>[-\d.]+),(?P<lat>[-\d.]+)/$',
'mobiletrans.mtlocation.views.renderkml', { },
name="mtlocation_renderkml_longlat"),
url('^kml/latlong/(?P<lat>[-\d.]+),(?P<long>[-\d.]+)/$',
'mobiletrans.mtlocation.views.renderkml', { },
name="mtlocation_renderkml_latlong"),
url('^api/', include('mobiletrans.mtapi.urls')),
)
|
autosportlabs/RaceCapture_App
|
autosportlabs/uix/color/colorsequence.py
|
Python
|
gpl-3.0
| 1,401 | 0.002141 |
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
from kivy.utils import get_color_from_hex as rgb
DEFAULT_COLOR_SEQUENCE = ['A0A0A0', '8A00B8', '3366FF', 'F5B800', '8AB800', 'f45b5b', 'ff0066']
class ColorSequence(object):
color_index = 0
colors = []
color_map = {}
def __init__(self, colors=DEFAULT_COLOR_SEQUENCE):
self.colors = colors
def get_color(self, key):
color = self.color_map.get(key)
if not colo
|
r:
index = s
|
elf.color_index
color = rgb(self.colors[index])
index = index + 1 if index < len(self.colors) - 1 else 0
self.color_index = index
self.color_map[key] = color
return color
|
UMWRG/HydraPlatform
|
HydraLib/python/HydraLib/config.py
|
Python
|
gpl-3.0
| 5,352 | 0.003737 |
# (c) Copyright 2013, 2014, University of Manchester
#
# HydraPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HydraPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with HydraPlatform. If not, see <http://www.gnu.org/licenses/>
#
import os
import glob
import ConfigParser
import sys
import logging
global CONFIG
CONFIG = None
global localfiles
global localfile
global repofile
global repofiles
global userfile
global userfiles
global sysfile
global sysfiles
def load_config():
"""Load a config file. This function looks for a config (*.ini) file in the
following order::
(1) ./*.ini
(2) ~/.config/hydra/
(3) /etc/hydra
(4) [...]/HYDRA/HydraLib/trunk/../../config/
(1) will override (2) will override (3) will override (4). Parameters not
defined in (1) will be taken from (2). Parameters not defined in (2) will
be taken from (3). (3) is the config folder that will be checked out from
the svn repository. (2) Will be be provided as soon as an installable
distribution is available. (1) will usually be written individually by
every user."""
global localfiles
global localfile
global repofile
global repofiles
global userfile
global userfiles
global sysfile
global sysfiles
global CONFIG
logging.basicConfig(level='INFO')
config = ConfigParser.ConfigParser(allow_no_value=True)
modulepath = os.path.dirname(os.path.abspath(__file__))
localfile = os.getcwd() + '/hydra.ini'
localfiles = glob.glob(localfile)
repofile = modulepath + '/../../../config/hydra.ini'
repofiles = glob.glob(repofile)
if os.name == 'nt':
import winpaths
userfile = os.path.expanduser('~') + '/AppData/Local/hydra.ini'
userfiles = glob.glob(userfile)
sysfile = winpaths.get_common_documents() + '/Hydra/hydra.ini'
sysfiles = glob.glob(sysfile)
else:
userfile = os.path.expanduser('~') + '/.config/hydra/hydra.ini'
userfiles = glob.glob(userfile)
sysfile = '/etc/hydra/hydra.ini'
sysfiles = glob.glob(sysfile)
for ini_file in repofiles:
logging.debug("Repofile: %s"%ini_file)
config.read(ini_file)
for ini_file in sysfiles:
logging.info("Sysfile: %s"%ini_file)
config.read(ini_file)
for ini_file in userfiles:
logging.info("Userfile: %s"%ini_file)
config.read(ini_file)
for ini_file in localfiles:
logging.info("Localfile: %s"%ini_file)
config.read(ini_file)
if os.name == 'nt':
set_windows_env_variables(config)
try:
home_dir = config.get('DEFAULT', 'home_dir')
except:
home_dir = os.environ.get('HYDRA_HOME_DIR', '~')
config.set('DEFAULT', 'home_dir', os.path.expanduser(home_dir))
try:
hydra_base = config.get('DEFAULT', 'hydra_base_dir')
except:
hydra_base = os.environ.get('HYDRA_BASE_DIR', modulepath + '/../../../')
config.set('DEFAULT', 'hydra_base_dir', os.path.expanduser(hydra_base))
CONFIG = config
return config
def set_windows_env_variables(config):
import winpaths
config.set('DEFAULT', 'common_app_data_folder', winpaths.get_common_appdata())
config.set('DEFAULT', 'win_local_appdata', winpaths.get_local_appdata())
config.set('DEFAULT', 'win_appdata', winpaths.get_appdata())
config.set('DEFAULT', 'win_desktop', winpaths.get_desktop())
config.set('DEFAULT', 'win_programs', winpaths.get_programs())
config.set('DEFAULT', 'win_common_admin_tools', winpaths.get_common_admin_tools())
config.set('DEFAULT', 'win_common_documents', winpaths.get_common_documents())
config.set('DEFAULT', 'win_cookies', winpaths.get_cookies())
config.set('DEFAULT', 'win_history', winpaths.get_history())
config.set('DEFAULT', 'win_internet_cache', winpaths.get_internet_cache())
config.set('DEFAULT', 'win_my_pictures', winpaths.get_my_pictures())
config.set('DEFAULT', 'win_personal', winpaths.get_personal())
config.set('DEFAULT', 'win_my_documents', winpaths.get_my_documents())
config.set('DEFAULT', 'win_program_files', winpaths.ge
|
t_program_files())
config.set('DEFAULT', 'win_program_files_common', winpaths.get_program_files_common())
config.set('DE
|
FAULT', 'win_system', winpaths.get_system())
config.set('DEFAULT', 'win_windows', winpaths.get_windows())
config.set('DEFAULT', 'win_startup', winpaths.get_startup())
config.set('DEFAULT', 'win_recent', winpaths.get_recent())
def get(section, option, default=None):
if CONFIG is None:
load_config()
try:
return CONFIG.get(section, option)
except:
return default
def getint(section, option, default=None):
if CONFIG is None:
load_config()
try:
return CONFIG.getint(section, option)
except:
return default
|
wikilinks/neleval
|
doc/conf.py
|
Python
|
apache-2.0
| 8,305 | 0 |
# -*- coding: utf-8 -*-
#
# project-template documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# project root
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'numpydoc',
'nbsphinx',
'sphinxcontrib.programoutput',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx_issues',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'neleval'
copyright = u'2014-2018 Joel Nothman, Ben Hachey, Will Radford'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.3-dev'
release = '3.0.3-dev'
# version = neleval.__version__
# The full version, including alpha/beta/rc tags.
# release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST de
|
fault role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to
|
all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'project-templatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'neleval.tex', u'neleval Documentation',
u'neleval contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
}
# Config for sphinx_issues
issues_uri = 'https://github.
|
timesong/pycha
|
chavier/dialogs.py
|
Python
|
lgpl-3.0
| 7,040 | 0.00071 |
# Copyright(c) 2007-2010 by Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Chavier.
#
# Chavier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Chavier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Chavier. If not, see <http://www.gnu.org/licenses/>.
import random
import webbrowser
import pygtk
pygtk.require('2.0')
import gtk
class TextInputDialog(gtk.Dialog):
def __init__(self, toplevel_window, suggested_name):
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
super(TextInputDialog, self).__init__(u'Enter a name for the dataset',
toplevel_window, flags, buttons)
self.set_default_size(300, -1)
hbox = gtk.HBox(spacing=6)
hbox.set_border_width(12)
label = gtk.Label(u'Name')
hbox.pack_start(label, False, False)
self.entry = gtk.Entry()
self.entry.set_text(suggested_name)
self.entry.set_activates_default(True)
hbox.pack_start(self.entry, True, True)
self.vbox.pack_start(hbox, False, False)
self.vbox.show_all()
self.set_default_response(gtk.RESPONSE_ACCEPT)
def get_name(self):
return self.entry.get_text()
class PointDialog(gtk.Dialog):
def __init__(self, toplevel_window, initial_x, initial_y):
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
super(PointDialog, self).__init__(u'Enter the point values',
toplevel_window, flags, buttons)
initials = {u'x': str(initial_x), u'y': str(initial_y)}
self.entries = {}
for coordinate in (u'x', u'y'):
hbox = gtk.HBox(spacing=6)
hbox.set_border_width(12)
label = gtk.Label(coordinate)
hbox.pack_start(label, False, False)
entry = gtk.Entry()
entry.set_activates_default(True)
entry.set_text(initials[coordinate])
hbox.pack_start(entry, True, True)
self.entries[coordinate] = entry
self.vbox.pack_start(hbox, False, False)
self.vbox.show_all()
self.set_default_response(gtk.RESPONSE_ACCEPT)
def get_point(self):
return (float(self.entries[u'x'].get_text()),
float(self.entries[u'y'].get_text()))
class OptionDialog(gtk.Dialog):
def __init__(self, toplevel_window, label, value, value_type):
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
super(OptionDialog, self).__init__(u'Enter the option value',
toplevel_window, flags, buttons)
hbox = gtk.HBox(spacing=6)
hbox.set_border_width(12)
label = gtk.Label(label)
hbox.pack_start(label, False, False)
self.entry = gtk.Entry()
self.entry.set_text(value or '')
self.entry.set_activates_default(True)
hbox.pack_start(self.entry, True, True)
self.vbox.pack_start(hbox, False, False)
self.vbox.show_all()
self.set_default_response(gtk.RESPONSE_ACCEPT)
def get_value(self):
return self.entry.get_text()
class RandomGeneratorDialog(gtk.Dialog):
def __init__(self, toplevel_window):
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
super(RandomGeneratorDialog, self).__init__(u'Points generation',
toplevel_window,
|
flags, buttons)
self.size_group = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
self.number = self._create_spin_button('Number of points
|
to generate',
0, 1, 5, 1, 1000, 10)
self.min = self._create_spin_button('Minimum y value',
2, 0.5, 1, -1000, 1000, 0)
self.max = self._create_spin_button('Maximum y value',
2, 0.5, 1, 0, 1000, 10)
self.vbox.show_all()
self.set_default_response(gtk.RESPONSE_ACCEPT)
def _create_spin_button(self, label_text, digits, step, page,
min_value, max_value, value):
hbox = gtk.HBox(spacing=6)
hbox.set_border_width(12)
label = gtk.Label(label_text)
label.set_alignment(1.0, 0.5)
self.size_group.add_widget(label)
hbox.pack_start(label, False, False)
spin_button = gtk.SpinButton(digits=digits)
spin_button.set_increments(step, page)
spin_button.set_range(min_value, max_value)
spin_button.set_value(value)
spin_button.set_activates_default(True)
hbox.pack_start(spin_button, True, True)
self.vbox.pack_start(hbox, False, False)
return spin_button
def generate_points(self):
n = self.number.get_value_as_int()
min_value = self.min.get_value()
max_value = self.max.get_value()
return [(x, random.uniform(min_value, max_value))
for x in range(n)]
class AboutDialog(gtk.AboutDialog):
def __init__(self, toplevel_window):
super(AboutDialog, self).__init__()
self.set_transient_for(toplevel_window)
self.set_name('Chavier')
self.set_version('0.1')
self.set_comments('A Chart Viewer for the Pycha library')
self.set_copyright('Copyleft 2008 Lorenzo Gil Sanchez')
#self.set_license('LGPL')
author = 'Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>'
self.set_authors([author])
self.set_program_name('Chavier')
self.set_website('http://www.lorenzogil.com/projects/pycha')
self.set_website_label('Project website')
def url_handler(dialog, link, data=None):
webbrowser.open(link)
gtk.about_dialog_set_url_hook(url_handler)
def warning(window, msg):
dialog = gtk.MessageDialog(window,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, msg)
dialog.run()
dialog.destroy()
|
kennethd/moto
|
moto/ec2/exceptions.py
|
Python
|
apache-2.0
| 10,404 | 0.000192 |
from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class EC2ClientError(RESTError):
code = 400
class DependencyViolationError(EC2ClientError):
def __init__(self, message):
super(DependencyViolationError, self).__init__(
"DependencyViolation", message)
class MissingParameterError(EC2ClientError):
def __init__(self, parameter):
super(MissingParameterError, self).__init__(
"MissingParameter",
"The request must contain the parameter {0}"
.format(parameter))
class InvalidDHCPOptionsIdError(EC2ClientError):
def __init__(self, dhcp_options_id):
super(InvalidDHCPOptionsIdError, self).__init__(
"InvalidDhcpOptionID.NotFound",
"DhcpOptionID {0} does not exist."
.format(dhcp_options_id))
class MalformedDHCPOptionsIdError(EC2ClientError):
def __init__(self, dhcp_options_id):
super(MalformedDHCPOptionsIdError, self).__init__(
"InvalidDhcpOptionsId.Malformed",
"Invalid id: \"{0}\" (expecting \"dopt-...\")"
.format(dhcp_options_id))
class InvalidKeyPairNameError(EC2ClientError):
def __init__(self, key):
super(InvalidKeyPairNameError, self).__init__(
"InvalidKeyPair.NotFound",
"
|
The keypair '{0}' does not exist."
.format(key))
clas
|
s InvalidKeyPairDuplicateError(EC2ClientError):
def __init__(self, key):
super(InvalidKeyPairDuplicateError, self).__init__(
"InvalidKeyPair.Duplicate",
"The keypair '{0}' already exists."
.format(key))
class InvalidVPCIdError(EC2ClientError):
def __init__(self, vpc_id):
super(InvalidVPCIdError, self).__init__(
"InvalidVpcID.NotFound",
"VpcID {0} does not exist."
.format(vpc_id))
class InvalidSubnetIdError(EC2ClientError):
def __init__(self, subnet_id):
super(InvalidSubnetIdError, self).__init__(
"InvalidSubnetID.NotFound",
"The subnet ID '{0}' does not exist"
.format(subnet_id))
class InvalidNetworkAclIdError(EC2ClientError):
def __init__(self, network_acl_id):
super(InvalidNetworkAclIdError, self).__init__(
"InvalidNetworkAclID.NotFound",
"The network acl ID '{0}' does not exist"
.format(network_acl_id))
class InvalidVpnGatewayIdError(EC2ClientError):
def __init__(self, network_acl_id):
super(InvalidVpnGatewayIdError, self).__init__(
"InvalidVpnGatewayID.NotFound",
"The virtual private gateway ID '{0}' does not exist"
.format(network_acl_id))
class InvalidNetworkInterfaceIdError(EC2ClientError):
def __init__(self, eni_id):
super(InvalidNetworkInterfaceIdError, self).__init__(
"InvalidNetworkInterfaceID.NotFound",
"The network interface ID '{0}' does not exist"
.format(eni_id))
class InvalidNetworkAttachmentIdError(EC2ClientError):
def __init__(self, attachment_id):
super(InvalidNetworkAttachmentIdError, self).__init__(
"InvalidAttachmentID.NotFound",
"The network interface attachment ID '{0}' does not exist"
.format(attachment_id))
class InvalidSecurityGroupDuplicateError(EC2ClientError):
def __init__(self, name):
super(InvalidSecurityGroupDuplicateError, self).__init__(
"InvalidGroup.Duplicate",
"The security group '{0}' already exists"
.format(name))
class InvalidSecurityGroupNotFoundError(EC2ClientError):
def __init__(self, name):
super(InvalidSecurityGroupNotFoundError, self).__init__(
"InvalidGroup.NotFound",
"The security group '{0}' does not exist"
.format(name))
class InvalidPermissionNotFoundError(EC2ClientError):
def __init__(self):
super(InvalidPermissionNotFoundError, self).__init__(
"InvalidPermission.NotFound",
"Could not find a matching ingress rule")
class InvalidRouteTableIdError(EC2ClientError):
def __init__(self, route_table_id):
super(InvalidRouteTableIdError, self).__init__(
"InvalidRouteTableID.NotFound",
"The routeTable ID '{0}' does not exist"
.format(route_table_id))
class InvalidRouteError(EC2ClientError):
def __init__(self, route_table_id, cidr):
super(InvalidRouteError, self).__init__(
"InvalidRoute.NotFound",
"no route with destination-cidr-block {0} in route table {1}"
.format(cidr, route_table_id))
class InvalidInstanceIdError(EC2ClientError):
def __init__(self, instance_id):
super(InvalidInstanceIdError, self).__init__(
"InvalidInstanceID.NotFound",
"The instance ID '{0}' does not exist"
.format(instance_id))
class InvalidAMIIdError(EC2ClientError):
def __init__(self, ami_id):
super(InvalidAMIIdError, self).__init__(
"InvalidAMIID.NotFound",
"The image id '[{0}]' does not exist"
.format(ami_id))
class InvalidAMIAttributeItemValueError(EC2ClientError):
def __init__(self, attribute, value):
super(InvalidAMIAttributeItemValueError, self).__init__(
"InvalidAMIAttributeItemValue",
"Invalid attribute item value \"{0}\" for {1} item type."
.format(value, attribute))
class MalformedAMIIdError(EC2ClientError):
def __init__(self, ami_id):
super(MalformedAMIIdError, self).__init__(
"InvalidAMIID.Malformed",
"Invalid id: \"{0}\" (expecting \"ami-...\")"
.format(ami_id))
class InvalidSnapshotIdError(EC2ClientError):
def __init__(self, snapshot_id):
super(InvalidSnapshotIdError, self).__init__(
"InvalidSnapshot.NotFound",
"") # Note: AWS returns empty message for this, as of 2014.08.22.
class InvalidVolumeIdError(EC2ClientError):
def __init__(self, volume_id):
super(InvalidVolumeIdError, self).__init__(
"InvalidVolume.NotFound",
"The volume '{0}' does not exist."
.format(volume_id))
class InvalidVolumeAttachmentError(EC2ClientError):
def __init__(self, volume_id, instance_id):
super(InvalidVolumeAttachmentError, self).__init__(
"InvalidAttachment.NotFound",
"Volume {0} can not be detached from {1} because it is not attached"
.format(volume_id, instance_id))
class InvalidDomainError(EC2ClientError):
def __init__(self, domain):
super(InvalidDomainError, self).__init__(
"InvalidParameterValue",
"Invalid value '{0}' for domain."
.format(domain))
class InvalidAddressError(EC2ClientError):
def __init__(self, ip):
super(InvalidAddressError, self).__init__(
"InvalidAddress.NotFound",
"Address '{0}' not found."
.format(ip))
class InvalidAllocationIdError(EC2ClientError):
def __init__(self, allocation_id):
super(InvalidAllocationIdError, self).__init__(
"InvalidAllocationID.NotFound",
"Allocation ID '{0}' not found."
.format(allocation_id))
class InvalidAssociationIdError(EC2ClientError):
def __init__(self, association_id):
super(InvalidAssociationIdError, self).__init__(
"InvalidAssociationID.NotFound",
"Association ID '{0}' not found."
.format(association_id))
class InvalidVPCPeeringConnectionIdError(EC2ClientError):
def __init__(self, vpc_peering_connection_id):
super(InvalidVPCPeeringConnectionIdError, self).__init__(
"InvalidVpcPeeringConnectionId.NotFound",
"VpcPeeringConnectionID {0} does not exist."
.format(vpc_peering_connection_id))
class InvalidVPCPeeringConnectionStateTransitionError(EC2ClientError):
def __init__(self, vpc_peering_connection_id):
super(InvalidVPCPeeringConnectionStateTransitionError, self).__init__(
"InvalidStateTransition",
"
|
eharney/nova
|
nova/tests/virt/vmwareapi/stubs.py
|
Python
|
apache-2.0
| 3,393 | 0.000295 |
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts for the test suite
"""
import contextlib
import mock
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
"""Stubs out the VMwareAPISession's get_vim_object method."""
return fake.FakeVim()
def fake_is_vim_object(arg, module):
"""Stubs out the VMwareAPISession's is_vim_object method."""
|
return isinstance(module, fake.FakeVim)
def fake_temp_method_exception():
raise error_util.VimFaultException(
[error_util.NOT_AUTHENTICATED],
"Session Empty/Not Authenticated")
def fake_temp_session_excep
|
tion():
raise error_util.SessionConnectionException("it's a fake!",
"Session Exception")
def fake_session_file_exception():
fault_list = [error_util.FILE_ALREADY_EXISTS]
raise error_util.VimFaultException(fault_list,
Exception('fake'))
def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
stubs.Set(driver.VMwareAPISession, "_get_vim_object",
fake_get_vim_object)
stubs.Set(driver.VMwareAPISession, "_is_vim_object",
fake_is_vim_object)
def fake_suds_context(calls={}):
"""Generate a suds client which automatically mocks all SOAP method calls.
Calls are stored in <calls>, indexed by the name of the call. If you need
to mock the behaviour of specific API calls you can pre-populate <calls>
with appropriate Mock objects.
"""
class fake_factory:
def create(self, name):
return mock.NonCallableMagicMock(name=name)
class fake_service:
def __getattr__(self, attr_name):
if attr_name in calls:
return calls[attr_name]
mock_call = mock.MagicMock(name=attr_name)
calls[attr_name] = mock_call
return mock_call
class fake_client:
def __init__(self, wdsl_url, **kwargs):
self.service = fake_service()
self.factory = fake_factory()
return contextlib.nested(
mock.patch('suds.client.Client', fake_client),
# As we're not connecting to a real host there's no need to wait
# between retries
mock.patch.object(driver, 'TIME_BETWEEN_API_CALL_RETRIES', 0)
)
|
vicamo/pcsc-lite-android
|
UnitaryTests/SCardConnect_DIRECT.py
|
Python
|
bsd-3-clause
| 3,325 | 0.001203 |
#! /usr/bin/env python
# SCardConnect_DIRECT.py : Unitary test for SCardConnect in DIRECT mode
# Copyright (C) 2009 Ludovic Rousseau
#
# T
|
his program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed
|
in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
# MSDN indicates that pdwActiveProtocol must be set to
# SCARD_PROTOCOL_UNDEFINED if SCARD_SHARE_DIRECT is used. This behavior
# has been implemented in revision 4332 but reverted in revision 4940 so
# that the protocol is not negociated again
from smartcard.scard import *
from smartcard.pcsc.PCSCExceptions import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
raise EstablishContextException(hresult)
hresult, readers = SCardListReaders(hcontext, [])
if hresult != SCARD_S_SUCCESS:
raise ListReadersException(hresult)
print 'PC/SC Readers:', readers
reader = readers[0]
print "Using reader:", reader
# the card should be reseted or inserted just before execution
# Connect in SCARD_SHARE_DIRECT mode
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, reader,
SCARD_SHARE_DIRECT, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print "dwActiveProtocol:", dwActiveProtocol
# Reconnect in SCARD_SHARE_DIRECT mode
hresult, dwActiveProtocol = SCardReconnect(hcard,
SCARD_SHARE_DIRECT, SCARD_PROTOCOL_ANY, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# ActiveProtocol should be SCARD_PROTOCOL_UNDEFINED (0)
print "dwActiveProtocol:", dwActiveProtocol
if SCARD_PROTOCOL_UNDEFINED != dwActiveProtocol:
raise Exception('dwActiveProtocol should be SCARD_PROTOCOL_UNDEFINED')
hresult = SCardDisconnect(hcard, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Connect in SCARD_SHARE_SHARED mode
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, reader,
SCARD_SHARE_SHARED, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print "dwActiveProtocol:", dwActiveProtocol
oldActiveProtocol = dwActiveProtocol
# Reconnect in SCARD_SHARE_DIRECT mode
hresult, dwActiveProtocol = SCardReconnect(hcard,
SCARD_SHARE_DIRECT, SCARD_PROTOCOL_ANY, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# ActiveProtocol should be SCARD_PROTOCOL_UNDEFINED (0)
print "dwActiveProtocol:", dwActiveProtocol
if oldActiveProtocol != dwActiveProtocol:
raise Exception('dwActiveProtocol should be like before')
hresult = SCardDisconnect(hcard, SCARD_RESET_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
hresult = SCardReleaseContext(hcontext)
if hresult != SCARD_S_SUCCESS:
raise ReleaseContextException(hresult)
|
DArtagan/charityfund
|
charityfund/settings.py
|
Python
|
mit
| 1,699 | 0 |
"""
Django settings for charityfund project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', False))
TEMPLATE_DEBUG = bool(os.environ.get('DEBUG', False))
ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(', ')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middlewa
|
re.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'charityfund.urls'
WSGI_APPLICATION = 'charityfund.wsgi.application'
# Database
DATABASES = {
'default': dj_database_url.config(default='sqlite://../db.sqlite3'),
}
# Internati
|
onalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
fergalmoran/energenie
|
socket.py
|
Python
|
apache-2.0
| 147 | 0.020408 |
from energenie import switch_on, switch_off
from time import sleep
print
|
("Turning off")
switch_off()
sleep(5)
print ("Turning on")
switc
|
h_on()
|
brendangregg/bcc
|
tools/tcpconnect.py
|
Python
|
apache-2.0
| 17,972 | 0.002393 |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpconnect Trace TCP connect()s.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpconnect [-h] [-c] [-t] [-p PID] [-P PORT [PORT ...]] [-4 | -6]
#
# All connection attempts are traced, even if they ultimately fail.
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 25-Sep-2015 Brendan Gregg Created this.
# 14-Feb-2016 " " Switch to bpf_perf_output.
# 09-Jan-2019 Takuma Kume Support filtering by UID
# 30-Jul-2019 Xiaozhou Liu Count connects.
# 07-Oct-2020 Nabil Schear Correlate connects with DNS responses
# 08-Mar-2021 Suresh Kumar Added LPORT option
from __future__ import print_function
from bcc import BPF
from bcc.containers import filter_by_containers
from bcc.utils import printb
import argparse
from socket import inet_ntop, ntohs, AF_INET, AF_INET6
from struct import pack
from time import sleep
from datetime import datetime
# arguments
examples = """examples:
./tcpconnect # trace all TCP connect()s
./tcpconnect -t # include timestamps
./tcpconnect -d # include DNS queries associated with connects
./tcpconnect -p 181 # only trace PID 181
./tcpconnect -P 80 # only trace port 80
./tcpconnect -P 80,81 # only trace port 80 and 81
./tcpconnect -4 # only trace IPv4 family
./tcpconnect -6 # only trace IPv6 family
./tcpconnect -U # include UID
./tcpconnect -u 1000 # only trace UID 1000
./tcpconnect -c # count connects per src ip and dest ip/port
./tcpconnect -L # include LPORT while printing outputs
./tcpconnect --cgroupmap mappath # only trace cgroups in this BPF map
./tcpconnect --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace TCP connects",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-P", "--port",
help="comma-separated list of destination ports to trace.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-4", "--ipv4", action="store_true",
help="trace IPv4 family only")
group.add_argument("-6", "--ipv6", action="store_true",
help="trace IPv6 family only")
parser.add_argument("-L", "--lport", action="store_true",
help="include LPORT on output")
parser.add_argument("-U", "--print-uid", action="store_true",
help="include UID on output")
parser.add_argument("-u", "--uid",
help="trace this UID only")
parser.add_argument("-c", "--count", action="store_true",
help="count connects per src ip and dest ip/port")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("-d", "--dns", action="store_true",
help="include likely DNS query associated with each connect")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
BPF_HASH(currsock, u32, struct sock *);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 uid;
u32 saddr;
u32 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
u32 uid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
// separate flow keys per address family
struct ipv4_flow_key_t {
u32 saddr;
u32 daddr;
u16 dport;
};
BPF_HASH(ipv4_count, struct ipv4_flow_key_t);
struct ipv6_flow_key_t {
unsigned __int128 saddr;
unsigned __int128 daddr;
u16 dport;
};
BPF_HASH(ipv6_count, struct ipv6_flow_key_t);
int trace_connect_entry(struct pt_regs *ctx, struct sock *sk)
{
if (container_should_be_filtered()) {
return 0;
}
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = pid_tgid;
FILTER_PID
u32 uid = bpf_get_current_uid_gid();
FILTER_UID
// stash the sock ptr for lookup on return
currsock.update(&tid, &sk);
return 0;
};
static int trace_connect_return(struct pt_regs *ctx, short ipver)
{
int ret = PT_REGS_RC(ctx);
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = pid_tgid;
struct sock **skpp;
skpp = currsock.lookup(&tid);
if (skpp == 0) {
return 0; // missed entry
}
if (ret != 0) {
// failed to send SYNC packet, may not have populated
// socket __sk_common.{skc_rcv_saddr, ...}
currsock.delete(&tid);
return 0;
}
// pull in details
struct sock *skp = *skpp;
u16 lport = skp->__sk_common.skc_num;
u16 dport = skp->__sk_common.skc_dport;
FILTER_PORT
FILTER_FAMILY
if (ipver == 4) {
IPV4_CODE
} else /* 6 */ {
IPV6_CODE
}
currsock.delete(&tid);
return 0;
}
int trace_connect_v4_return(struct pt_regs *ctx)
{
return trace_connect_return(ctx, 4);
}
int trace_connect_v6_return(struct pt_regs *ctx)
{
return trace_connect_return(ctx, 6);
}
"""
struct_init = {'ipv4':
{'count':
"""
struct ipv4_flow_key_t flow_key = {};
flow_key.saddr = skp->__sk_common.skc_rcv_saddr;
flow_key.daddr = skp->__sk_common.skc_daddr;
flow_key.dport = ntohs(dport);
ipv4_count.increment(flow_key);""",
'trace':
"""
struct ipv4_data_t data4 = {.pid = pid, .ip = ipver};
data4.uid = bpf_get_current_uid_gid();
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = skp->__sk_common.skc_rcv_saddr;
data4.daddr = skp->__sk_common.skc_daddr;
data4.lport = lport;
data4.dport = ntohs(dport);
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));"""
},
'ipv6':
{'count':
"""
struct ipv6_flow_key_t flow_key = {};
bpf_probe_read_kernel(&flow_key.saddr, sizeof(flow_key.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&flow_key.daddr, sizeof(flow_key.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
flow_key.dport = ntohs(dport);
ipv6_count.increment(flow_key);""",
'trace':
"""
struct ipv6_data_t data6 = {.pid = pid, .ip = ipver};
data6.uid = bpf_get_current_uid_gid();
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_prob
|
e_read_kernel(&data6.daddr, sizeof(data6.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.l
|
port = lport;
data6.dport = ntohs(dport);
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));"""
}
}
# This defines an additional BPF program that instruments udp_recvmsg system
# call to locate DNS response packets on UDP port 53. When these packets are
# located, the data is copied to user-space where python will parse them with
# dnslib.
#
# uses a percp
|
yinglanma/AI-project
|
tensorpack/tfutils/summary.py
|
Python
|
apache-2.0
| 3,816 | 0.002621 |
# -*- coding: UTF-8 -*-
# File: summary.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
import six
import tensorflow as tf
import re
from ..utils import *
from . import get_global_step_var
from .symbolic_functions import rms
__all__ = ['create_summary', 'add_param_summary', 'add_activation_summary',
'add_moving_summary', 'summary_moving_average']
def create_summary(name, v):
"""
Return a tf.Summary object with name and simple scalar value v
"""
assert isinstance(name, six.string_types), type(name)
v = float(v)
s = tf.Summary()
s.value.add(tag=name, simple_value=v)
return s
def add_activation_summary(x, name=None):
"""
Add summary to graph for an activation tensor x.
If name is None, use x.name.
"""
ndim = x.get_shape().ndims
assert ndim >= 2, \
"Summary a scalar with histogra
|
m? Maybe use scalar instead. FIXME!"
if name is None:
name = x.name
with tf.name_scope('act_summary'):
tf.histogram_summary(name + '/activation', x)
|
tf.scalar_summary(name + '/activation_sparsity', tf.nn.zero_fraction(x))
tf.scalar_summary(
name + '/activation_rms', rms(x))
def add_param_summary(summary_lists):
"""
Add summary for all trainable variables matching the regex
:param summary_lists: list of (regex, [list of summary type to perform]).
Type can be 'mean', 'scalar', 'histogram', 'sparsity', 'rms'
"""
def perform(var, action):
ndim = var.get_shape().ndims
name = var.name.replace(':0', '')
if action == 'scalar':
assert ndim == 0, "Scalar summary on high-dimension data. Maybe you want 'mean'?"
tf.scalar_summary(name, var)
return
assert ndim > 0, "Cannot perform {} summary on scalar data".format(action)
if action == 'histogram':
tf.histogram_summary(name, var)
return
if action == 'sparsity':
tf.scalar_summary(name + '/sparsity', tf.nn.zero_fraction(var))
return
if action == 'mean':
tf.scalar_summary(name + '/mean', tf.reduce_mean(var))
return
if action == 'rms':
tf.scalar_summary(name + '/rms', rms(var))
return
raise RuntimeError("Unknown summary type: {}".format(action))
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
with tf.name_scope('param_summary'):
for p in params:
name = p.name
for rgx, actions in summary_lists:
if not rgx.endswith('$'):
rgx = rgx + '(:0)?$'
if re.match(rgx, name):
for act in actions:
perform(p, act)
def add_moving_summary(v, *args):
"""
:param v: tensor or list of tensor to summary
:param args: tensors to summary
"""
if not isinstance(v, list):
v = [v]
v.extend(args)
for x in v:
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, x)
def summary_moving_average():
""" Create a MovingAverage op and summary for all variables in
MOVING_SUMMARY_VARS_KEY.
:returns: a op to maintain these average.
"""
with tf.name_scope('EMA_summary'):
global_step_var = get_global_step_var()
with tf.name_scope(None):
averager = tf.train.ExponentialMovingAverage(
0.99, num_updates=global_step_var, name='EMA')
vars_to_summary = tf.get_collection(MOVING_SUMMARY_VARS_KEY)
avg_maintain_op = averager.apply(vars_to_summary)
for idx, c in enumerate(vars_to_summary):
# TODO assert scalar
name = re.sub('tower[p0-9]+/', '', c.op.name)
tf.scalar_summary(name, averager.average(c))
return avg_maintain_op
|
MattNolanLab/ei-attractor
|
grid_cell_model/simulations/007_noise/figures/paper/ee_connections_ei_flat/figure_drifts.py
|
Python
|
gpl-3.0
| 501 | 0.001996 |
#!/usr/bin/env python
from __future__ import absolute_import, print_function
from grid
|
_cell_model.submitting import flagparse
import noisefigs
from noisefigs.env import NoiseEnvironment
import config_standard_gEE_3060 as config
parser = flagparse.FlagParser()
parser.add_flag('--bumpDriftSweep')
args = parser.parse_args()
env = NoiseEnvironment(user_config=config.get_config())
if args.bumpDriftSweep
|
or args.all:
env.register_plotter(noisefigs.plotters.BumpDriftAtTimePlotter)
env.plot()
|
noironetworks/neutron
|
neutron/tests/unit/extensions/test_network_ip_availability.py
|
Python
|
apache-2.0
| 21,269 | 0 |
# Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants
import neutron.api.extensions as api_ext
import neutron.common.config as config
import neutron.extensions
import neutron.services.network_ip_availability.plugin as plugin_module
import neutron.tests.unit.db.test_db_base_plugin_v2 as test_db_base_plugin_v2
API_RESOURCE = 'network-ip-availabilities'
IP_AVAIL_KEY = 'network_ip_availability'
IP_AVAILS_KEY = 'network_ip_availabilities'
EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__)
PLUGIN_NAME = '%s.%s' % (plugin_module.NetworkIPAvailabilityPlugin.__module__,
plugin_module.NetworkIPAvailabilityPlugin.__name__)
class TestNetworkIPAvailabilityAPI(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
svc_plugins = {'plugin_name': PLUGIN_NAME}
super(TestNetworkIPAvailabilityAPI, self).setUp(
service_plugins=svc_plugins)
self.plugin = plugin_modul
|
e.NetworkIPAvailabilityPlugin()
ext_mgr = api_ext.PluginAwareExtensionManager(
EXTENSIONS_PATH, {"network-ip-availability": self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
def _validate_availability(self, network, availability, expected_used_ips,
expected_total_ips=253):
self.assertEqual(network['name
|
'], availability['network_name'])
self.assertEqual(network['id'], availability['network_id'])
self.assertEqual(expected_used_ips, availability['used_ips'])
self.assertEqual(expected_total_ips, availability['total_ips'])
def _validate_from_availabilities(self, availabilities, wrapped_network,
expected_used_ips,
expected_total_ips=253):
network = wrapped_network['network']
availability = self._find_availability(availabilities, network['id'])
self.assertIsNotNone(availability)
self._validate_availability(network, availability,
expected_used_ips=expected_used_ips,
expected_total_ips=expected_total_ips)
def test_usages_query_list_with_fields_total_ips(self):
with self.network() as net:
with self.subnet(network=net):
# list by query fields: total_ips
params = 'fields=total_ips'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
availability = response[IP_AVAILS_KEY][0]
self.assertIn('total_ips', availability)
self.assertEqual(253, availability['total_ips'])
self.assertNotIn('network_id', availability)
def test_usages_query_show_with_fields_total_ips(self):
with self.network() as net:
with self.subnet(network=net):
network = net['network']
# Show by query fields: total_ips
params = ['total_ips']
request = self.new_show_request(API_RESOURCE,
network['id'],
fields=params)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
availability = response[IP_AVAIL_KEY]
self.assertIn('total_ips', availability)
self.assertEqual(253, availability['total_ips'])
self.assertNotIn('network_id', availability)
@staticmethod
def _find_availability(availabilities, net_id):
for ip_availability in availabilities:
if net_id == ip_availability['network_id']:
return ip_availability
def test_basic(self):
with self.network() as net:
with self.subnet(network=net):
network = net['network']
# Get ALL
request = self.new_list_request(API_RESOURCE, self.fmt)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 0)
# Get single via id
request = self.new_show_request(API_RESOURCE, network['id'])
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
usage = response[IP_AVAIL_KEY]
self._validate_availability(network, usage, 0)
def test_usages_multi_nets_subnets(self):
with self.network(name='net1') as n1,\
self.network(name='net2') as n2,\
self.network(name='net3') as n3:
# n1 should have 2 subnets, n2 should have none, n3 has 1
with self.subnet(network=n1) as subnet1_1, \
self.subnet(cidr='40.0.0.0/24', network=n3) as subnet3_1:
# Consume 3 ports n1, none n2, 2 ports on n3
with self.port(subnet=subnet1_1),\
self.port(subnet=subnet1_1),\
self.port(subnet=subnet1_1),\
self.port(subnet=subnet3_1),\
self.port(subnet=subnet3_1):
# Test get ALL
request = self.new_list_request(API_RESOURCE)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(3, len(response[IP_AVAILS_KEY]))
data = response[IP_AVAILS_KEY]
self._validate_from_availabilities(data, n1, 3, 253)
self._validate_from_availabilities(data, n2, 0, 0)
self._validate_from_availabilities(data, n3, 2, 253)
# Test get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
network['id'])
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
self._validate_availability(network,
response[IP_AVAIL_KEY], 3, 253)
def test_usages_multi_nets_subnets_sums(self):
with self.network(name='net1') as n1:
# n1 has 2 subnets
with self.subnet(network=n1) as subnet1_1, \
self.subnet(cidr='40.0.0.0/24', network=n1) as subnet1_2:
# Consume 3 ports n1: 1 on subnet 1 and 2 on subnet 2
with self.port(subnet=subnet1_1),\
self.port(subnet=subnet1_2),\
self.port(subnet=subnet1_2):
# Get ALL
request = self.new_list_request(API_R
|
immerrr/numpy
|
numpy/lib/twodim_base.py
|
Python
|
bsd-3-clause
| 26,858 | 0.000037 |
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
__all__ = ['diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri',
'triu', 'tril', 'vander', 'histogram2d', 'mask_indices',
'tril_indices', 'tril_indices_from', 'triu_indices',
'triu_indices_from',
]
from numpy.core.numeric import (
asanyarray, subtract, arange, zeros, greater_equal, multiply, ones,
asarray, where, dtype as np_dtype, less, int8, int16, int32, int64
)
from numpy.core import iinfo
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a posi
|
tive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum alon
|
g diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1,
|
simphony/simphony-lammps-md
|
edmsetup.py
|
Python
|
bsd-2-clause
| 1,237 | 0 |
import sys
import click
import os
import subprocess
from packageinfo import BUILD, VERSION, NAME
# The version of the buildcommon to checkout.
BUILDCOMMONS_VERSION = "v0.2"
def bootstrap_devenv():
try:
os.makedirs(".devenv")
except
|
OSError:
pass
if not os.path.exists(".devenv/buildrecipes-common"):
subprocess.check_call([
"git", "clone", "-b", BUILDCOMMONS_VERSION,
"http://github.com/simphony/buildrecipes-common.git",
".devenv/buildrecipes-common"
])
sys.path.insert(0, ".devenv/buildrecipes-common")
bootstrap_devenv()
import buildcommons as common # noqa
w
|
orkspace = common.workspace()
common.edmenv_setup()
@click.group()
def cli():
pass
@cli.command()
def egg():
common.local_repo_to_edm_egg(".", name=NAME, version=VERSION, build=BUILD)
@cli.command()
def upload_egg():
egg_path = "endist/{NAME}-{VERSION}-{BUILD}.egg".format(
NAME=NAME,
VERSION=VERSION,
BUILD=BUILD)
click.echo("Uploading {} to EDM repo".format(egg_path))
common.upload_egg(egg_path)
click.echo("Done")
@cli.command()
def clean():
click.echo("Cleaning")
common.clean(["endist", ".devenv"])
cli()
|
jayclassless/tidypy
|
src/tidypy/tools/pyroma.py
|
Python
|
mit
| 4,228 | 0.000237 |
import logging
import os
import warnings
from ..util import SysOutCapture
from .base import Tool, Issue, ToolIssue
# Hacks to prevent pyroma from screwing up the logging system for everyone else
old_config = logging.basicConfig
try:
logging.basicConfig = lambda **k: None
from pyroma import projectdata, ratings
finally:
logging.basicConfig = old_config
# Hacks so we can get the messages of these tests without running them.
HACKS = (
|
('PythonVersion', '_major_version_specified', False),
('ValidREST', '_message', ''),
|
('ClassifierVerification', '_incorrect', []),
('Licensing', '_message', ''),
)
for clazz, attr, value in HACKS:
if hasattr(ratings, clazz):
setattr(getattr(ratings, clazz), attr, value)
TIDYPY_ISSUES = {
'NOT_CALLED': (
'SetupNotCalled',
'setup() was not invoked.',
),
'SCRIPT_FAIL': (
'SetupFailed',
'Execution of the setup module failed:\n%s',
),
'RST_ERROR': (
'RstProblem',
'The reStructuredText in your description generated errors:\n%s',
),
}
class PyromaIssue(Issue):
tool = 'pyroma'
class PyromaTool(Tool):
"""
Pyroma tests your project's packaging friendliness.
"""
@classmethod
def get_default_config(cls):
config = Tool.get_default_config()
config['filters'] = [
r'setup\.py$',
]
return config
@classmethod
def get_all_codes(cls):
return [
(test.__class__.__name__, test.message().strip())
for test in ratings.ALL_TESTS
] + list(TIDYPY_ISSUES.values())
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.disabled = self.config['disabled'][:]
if 'LicenseClassifier' in self.disabled:
self.disabled.append('LicenceClassifier')
if 'Licence' in self.disabled:
self.disabled.append('License')
def execute(self, finder):
issues = []
for filepath in finder.files(self.config['filters']):
dirname, _ = os.path.split(filepath)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with SysOutCapture() as capture:
try:
data = projectdata.get_data(dirname)
except RuntimeError:
err = capture.get_stderr()
if err:
issues.append(PyromaIssue(
TIDYPY_ISSUES['SCRIPT_FAIL'][0],
TIDYPY_ISSUES['SCRIPT_FAIL'][1] % (err,),
filepath,
))
else:
issues.append(PyromaIssue(
TIDYPY_ISSUES['NOT_CALLED'][0],
TIDYPY_ISSUES['NOT_CALLED'][1],
filepath,
))
continue
for test in ratings.ALL_TESTS:
name = test.__class__.__name__
if name in self.disabled:
continue
if test.test(data) is False:
issues.append(PyromaIssue(
name,
test.message(),
filepath,
))
err = capture.get_stderr()
if err:
if err.startswith('<string>:'):
issues.append(PyromaIssue(
TIDYPY_ISSUES['RST_ERROR'][0],
TIDYPY_ISSUES['RST_ERROR'][1] % (err,),
filepath,
))
else:
issues.append(ToolIssue(
err,
filepath,
))
return [
issue
for issue in issues
if issue.code not in self.disabled
]
|
Jaden-J/shape2ge
|
src/shapeobjects.py
|
Python
|
gpl-2.0
| 10,069 | 0.049558 |
###############################################################################
# Copyright (C) 2008 Johann Haarhoff <johann.haarhoff@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of Version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
###############################################################################
#
# Originally written:
# 2008 Johann Haarhoff, <johann.haarhoff@gmail.com>
# Modifications:
#
###############################################################################
#global modules
import shapelibc
import dbflibc
import sys
#my modules
from xmlwriter import * #AUTO_REMOVED by make
from vec import * #AUTO_REMOVED by make
def castSpecific(shpobj):
"""
if given a SHPObject, this will return a more
specific version like SHPPointObject depending
on the SHPType of the given object
"""
if shpobj._SHPType == shapelibc.SHPT_POINT:
obj = SHPPointObject()
obj.createFromObject(shpobj)
return obj
elif shpobj._SHPType == shapelibc.SHPT_ARCZ:
obj = SHPArcZObject()
obj.createFromObject(shpobj)
return obj
elif shpobj._SHPType == shapelibc.SHPT_ARC:
obj = SHPArcObject()
obj.createFromObject(shpobj)
return obj
elif shpobj._SHPType == shapelibc.SHPT_POLYGONZ:
obj = SHPPolygonZObject()
obj.createFromObject(shpobj)
return obj
elif shpobj._SHPType == shapelibc.SHPT_POLYGON:
obj = SHPPolygonObject()
obj.createFromObject(shpobj)
return obj
class WrongShapeObjectError(Exception):
"""
Thrown when trying to instantiate say a
SHPPointOPbject from file, and the file
returns a different type
"""
pass
class SHPObject():
def __init__(self,SHPType = shapelibc.SHPT_NULL,SHPId = -1,Verts = [[]],Label="",Desc = ""):
self._SHPType = SHPType
self._SHPId = SHPId
self._Verts = Verts
self._Label = Label
self._Desc = Desc
def createFromFile(self,filestream,shapenum):
"""
The filestream should already be opened
with shapelibc.open() before calling this
"""
shp = shapelibc.ShapeFile_read_object(filestream,shapenum)
SHPObject.__init__(self,shapelibc.SHPObject_type_get(shp),
shapelibc.SHPObject_id_get(shp),
shapelibc.SHPObject_vertices(shp))
def makeDescriptionFromFile(self,filestream,shapenum):
"""
The filestream should already be opened
with dbflibc.open() before calling this
"""
numfields = dbflibc.DBFFile_field_count(filestream)
for i in range(0,numfields):
field_name = str(dbflibc.DBFFile_field_info(filestream,i)[1]).upper()
field_data = str(dbflibc.DBFFile_read_attribute(filestream,shapenum,i)).lower()
self._Desc = self._Desc + "<b>" + field_name + ": </b>" + field_data + "<br>"
class SHPPointObject(SHPObject):
def __init__(self,SHPId = -1,Verts = [[]],Label="",Desc=""):
SHPObject.__init__(self,shapelibc.SHPT_POINT,SHPId,Verts,Label,Desc)
def createFromFile(self,filestream,shapenum):
SHPObject.createFromFile(self,filestream,shapenum)
if self._SHPType != shapelibc.SHPT_POINT:
raise WrongShapeObjectError()
def createFromObject(self,shpobject):
if shpobject._SHPType != shapelibc.SHPT_POINT:
raise WrongShapeObjectError()
SHPPointObject.__init__(self,shpobject._SHPId,shpobject._Verts,shpobject._Label,shpobject._Desc)
def toKML(self,out,styleUrl="",indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("Placemark")
kmlwriter.openElement("name")
if self._Label == "":
kmlwriter.addData(str(self._SHPId))
else:
kmlwriter.addData(str(self._Label))
kmlwriter.closeLast()
kmlwriter.openElement("styleUrl")
kmlwriter.addData(str(styleUrl))
kmlwriter.closeLast()
kmlwriter.openElement("description")
kmlwriter.addCData(self._Desc)
kmlwriter.closeLast()
kmlwriter.openElement("Point")
kmlwriter.openElement("coordinates")
for i,j in self._Verts:
kmlwriter.addData(str(i)+","+str(j)+",0 ")
kmlwriter.endDocument()
class SHPArcZObject(SHPObject):
def __init__(self,SHPId = -1,Verts = [[]],Label="",Desc=""):
SHPObject.__init__(self,shapelibc.SHPT_ARCZ,SHPId,Verts,Label,Desc)
def createFromFile(self,filestream,shapenum):
SHPObject.createFromFile(self,filestream,shapenum)
if self._SHPType != shapelibc.SHPT_ARCZ:
raise WrongShapeObjectError()
def createFromObject(self,shpobject):
if shpobject._SHPType != shapelibc.SHPT_ARCZ:
raise WrongShapeObjectError()
SHPArcZObject.__init__(self,shpobject._SHPId,shpobject._Verts,shpobject._Label,shpobject._Desc)
def toKML(self,out,styleUrl="",indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("Placemark")
kmlwriter.openElement("name")
if self._Label == "":
kmlwriter.addData(str(self._SHPId))
else:
kmlwriter.addData(str(self._Label))
kmlwriter.closeLast()
kmlwriter.openElement("styleUrl")
kmlwriter.addData(str(styleUrl))
kmlwriter.closeLast()
kmlwriter.openElement("description")
kmlwriter.addCData(self._Desc)
kmlwriter.closeLast()
kmlwriter.openElement("LineString")
kmlwriter.openElement("tessellate")
kmlwriter.addData("1")
kmlwriter.closeLast()
kmlwriter.openElement("coordinates")
#shapelibc does not populate _Verts properly,
#so we need to check for the Z coordinate
#even if this is an ArcZ
if len(self._Verts[0][0]) == 2:
#we only have x and y
for i,j in self._Verts[0]:
kmlwriter.addData(str(i)+","+str(j)+",0 ")
elif len(self._Verts[0][0]) == 3:
#we have x, y and z
for i,j,k in self._Verts[0]:
kmlwriter.addData(str(i)+","+str(j)+","+str(k)+" ")
elif len(self._Verts[0][0]) == 4:
#we have x,y,z and m
#I don't know what to do with m at this stage
for i,j,k,l in self._Verts[0]:
kmlwriter.addData(str(i)+","+str(j)+","+str(k)+" ")
|
kmlwriter.endDocument()
class SHPArcObject(SHPArcZObject):
def __init__(self,SHPId
|
= -1,Verts = [[]],Label="",Desc=""):
SHPObject.__init__(self,shapelibc.SHPT_ARC,SHPId,Verts,Label,Desc)
def createFromFile(self,filestream,shapenum):
SHPObject.createFromFile(self,filestream,shapenum)
if self._SHPType != shapelibc.SHPT_ARC:
raise WrongShapeObjectError()
def createFromObject(self,shpobject):
if shpobject._SHPType != shapelibc.SHPT_ARC:
raise WrongShapeObjectError()
SHPArcObject.__init__(self,shpobject._SHPId,shpobject._Verts,shpobject._Label,shpobject._Desc)
class SHPPolygonZObject(SHPObject):
def __init__(self,SHPId = -1,Verts = [[]],Label="",Desc=""):
SHPObject.__init__(self,shapelibc.SHPT_POLYGONZ,SHPId,Verts,Label,Desc)
def createFromFile(self,filestream,shapenum):
SHPObject.createFromFile(self,filestream,shapenum)
if self._SHPType != shapelibc.SHPT_POLYGONZ:
raise WrongShapeObjectError()
def createFromObject(self,shpobject):
if shpobject._SHPType != shapelibc.SHPT_POLYGONZ:
raise WrongShapeObjectError()
SHPPolygonZObject.__init__(self,shpobject._SHPId,shpobject._Verts,shpobject._Label,shpobject._Desc)
def toKML(self,out,styleUrl="",indentstr = '\t'):
kmlwriter = BetterXMLWriter(out,indentstr)
kmlwriter.openElement("Placemark")
kmlwriter.openElement("name")
if self._Label == "":
kmlwriter.addData(str(self._SHPId))
else:
kmlwriter.addData(str(self._Label))
kmlwriter.closeLast()
kmlwriter.openElement("styleUrl")
kmlwriter.addData(str(styleUrl))
kmlwriter.closeLast()
kmlwriter.openElement("description")
kmlwriter.addCData(self._Desc)
kmlwriter.closeLast()
kmlwriter.openElement("Polygon")
kmlwriter.openElement("extrude")
kmlwriter.addData("0")
kmlwriter.closeLast()
kmlwriter.openElement("tessellate")
kmlwriter.addData("1")
kmlwriter.closeLast()
#polygons may have multiple parts
#in t
|
scottcunningham/ansible
|
lib/ansible/executor/process/worker.py
|
Python
|
gpl-3.0
| 6,192 | 0.004037 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue
import multiprocessing
import os
import signal
import sys
import time
import traceback
HAS_ATFORK=True
try:
from Crypto.Random import atfork
except ImportError:
HAS_ATFORK=False
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.utils.debug import debug
__all__ = ['WorkerProcess']
class WorkerProcess(multiprocessing.Process):
'''
The worker thread class, which uses TaskExecutor to run tasks
read from a job queue and pushes results into a results queue
for reading later.
'''
def __init__(self, tqm, main_q, rslt_q, loader):
# takes a task queue manager as the sole param:
self._main_q = main_q
self._rslt_q = rslt_q
self._loader = loader
# dupe stdin, if we have one
self._new_stdin = sys.stdin
try:
fileno = sys.stdin.fileno()
if fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
except ValueError:
# couldn't get stdin's fileno, so we just carry on
pass
super(WorkerProcess, self).__init__()
def run(self):
'''
Called when the process is started, and loops indefinitely
until an error is encountered (typically an IOerror from the
queue pipe being disconnected). During the loop, we attempt
to pull tasks off the job queue and run them, pushing the result
onto the results queue. We also remove the host from the blocked
hosts list, to signify that they are ready for their next task.
'''
if HAS_ATFORK:
|
atfork()
while True:
task = None
try:
if not self._main_q.empty():
debug("there's work to be done!")
|
(host, task, basedir, job_vars, play_context, shared_loader_obj) = self._main_q.get(block=False)
debug("got a task/handler to work on: %s" % task)
# because the task queue manager starts workers (forks) before the
# playbook is loaded, set the basedir of the loader inherted by
# this fork now so that we can find files correctly
self._loader.set_basedir(basedir)
# Serializing/deserializing tasks does not preserve the loader attribute,
# since it is passed to the worker during the forking of the process and
# would be wasteful to serialize. So we set it here on the task now, and
# the task handles updating parent/child objects as needed.
task.set_loader(self._loader)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
new_play_context = play_context.set_task_and_host_override(task=task, host=host)
# execute the task and build a TaskResult from the result
debug("running TaskExecutor() for %s/%s" % (host, task))
executor_result = TaskExecutor(host, task, job_vars, new_play_context, self._new_stdin, self._loader, shared_loader_obj).run()
debug("done running TaskExecutor() for %s/%s" % (host, task))
task_result = TaskResult(host, task, executor_result)
# put the result on the result queue
debug("sending task result")
self._rslt_q.put(task_result, block=False)
debug("done sending task result")
else:
time.sleep(0.1)
except queue.Empty:
pass
except (IOError, EOFError, KeyboardInterrupt):
break
except AnsibleConnectionFailure:
try:
if task:
task_result = TaskResult(host, task, dict(unreachable=True))
self._rslt_q.put(task_result, block=False)
except:
# FIXME: most likely an abort, catch those kinds of errors specifically
break
except Exception, e:
debug("WORKER EXCEPTION: %s" % e)
debug("WORKER EXCEPTION: %s" % traceback.format_exc())
try:
if task:
task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
self._rslt_q.put(task_result, block=False)
except:
# FIXME: most likely an abort, catch those kinds of errors specifically
break
debug("WORKER PROCESS EXITING")
|
ktan2020/legacy-automation
|
samples/misc/sel_google_search_phantomjs.py
|
Python
|
mit
| 1,101 | 0.00545 |
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
# Create a new instance of the IE driver
driver = webdriver.PhantomJS()
# go to the google home page
driver.get("http://www.google.com")
# find the element that's name attribute is q (the google search box)
inputElement = driver.find_element_by_name("q")
# type in the search
inputElement.send_keys("cheese!")
# submit the form (although google automatically searches now without submitting)
inputElement.submit()
# the page is ajaxy so the title is originally this:
print driver.title
driver.get_screenshot_as_file('screenshot.png')
try:
# we have to wait fo
|
r the page to refresh, the last thing that seems to be updated is the title
WebDriverWait(driver, 10).until(EC.title_conta
|
ins("cheese!"))
# You should see "cheese! - Google Search"
print driver.title
finally:
driver.quit()
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/converters/OpenMM.py
|
Python
|
gpl-2.0
| 7,397 | 0.001622 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""OpenMM structure I/O --- :mod:`MDAnalysis.converters.OpenMM`
================================================================
Read coordinates data from a
`OpenMM <http://docs.openmm.org/latest/api-python/generated/openmm.app.simulation.Simulation.html#openmm.app.simulation.Simulation>`_
:class:`openmm.app.simulation.Simulation` with :class:`OpenMMReader`
into a MDAnalysis Universe.
Also converts other objects within the
`OpenMM Application Layer <http://docs.openmm.org/latest/api-python/app.html>`_:
- `openmm.app.pdbfile.PDBFile <http://docs.openmm.org/latest/api-python/generated/openmm.app.pdbfile.PDBFile.html#openmm.app.pdbfile.PDBFile>`_
- `openmm.app.modeller.Modeller <http://docs.openmm.org/latest/api-python/generated/openmm.app.modeller.Modeller.html#openmm.app.modeller.Modeller>`_
- `openmm.app.pdbxfile.PDBxFile <http://docs.openmm.org/latest/api-python/generated/openmm.app.pdbxfile.PDBxFile.html#openmm.app.pdbxfile.PDBxFile>`_
Example
-------
OpenMM can read various file formats into OpenMM objects.
MDAnalysis can then convert some of these OpenMM objects into MDAnalysis Universe objects.
>>> import openmm.app as app
>>> import MDAnalysis as mda
>>> from MDAnalysis.tests.datafiles import PDBX
>>> pdbxfile = app.PDBxFile(PDBX)
>>> mda.Universe(pdbxfile)
<Universe with 60 atoms>
Classes
-------
.. autoclass:: OpenMMSimulationReader
:members:
.. autoclass:: OpenMMAppReader
:members:
"""
import numpy as np
from ..coordinates import base
class OpenMMSimulationReader(base.SingleFrameReaderBase):
"""Reader for OpenMM Simulation objects
.. versionadded:: 2.0.0
"""
format = "OPENMMSIMULATION"
units = {"time": "ps", "length": "nm", "velocity": "nm/ps",
"force": "kJ/(mol*nm)", "energy": "kJ/mol"}
@staticmethod
def _format_hint(thing):
"""Can this reader read *thing*?
"""
try:
from openmm.app import Simulation
except ImportError:
try: # pragma: no cover
from simtk.openmm.app import Simulation
except ImportError:
return False
else:
return isinstance(thing, Simulation)
def _read_first_frame(self):
self.n_atoms = self.filename.topology.getNumAtoms()
self.ts = self._mda_timestep_from_omm_context()
if self.convert_units:
self.convert_pos_from_native(self.ts._pos)
self.ts.triclinic_dimensions = self.convert_pos_from_native(
self.ts.triclinic_dimensions, inplace=False
)
self.ts.dimensions[3:] = _sanitize_box_angles(self.ts.dimensions[3:])
self.convert_velocities_from_native(self.ts._velocities)
self.convert_forces_from_native(self.ts._forces)
self.convert_time_from_native(self.ts.dt)
def _mda_timestep_from_omm_context(self):
""" Construct Timestep object from OpenMM context """
try:
import openmm.unit as u
except ImportError: # pragma: no cover
import simtk.unit as u
state = self.filename.context.getState(-1, getVelocities=True,
getForces=True, getEnergy=True)
n_atoms = self.filename.context.getSystem().getNumParticles()
ts = self._Timestep(n_atoms, **self._ts_kwargs)
ts.frame = 0
ts.data["time"] = state.getTime()._value
ts.data["potential_energy"] = (
state.getPotentialEnergy().in_uni
|
ts_of(u.kilojoule/u.mole)
)
ts.data["kinetic_energy"] = (
state.getKineticEnergy().in_units_of(u.kilojoule/u.mole)
)
ts.triclinic_dimensions = state.getPeriodicBoxVectors(
asNumpy=True)._value
ts.dimensions[3:] = _sanitize_box_angles(ts.dimensions[3:])
ts.positions = state.getPositions(asNumpy=True)._value
ts.velocities = state.getVelocities(asNumpy=True)._value
ts.forces = sta
|
te.getForces(asNumpy=True)._value
return ts
class OpenMMAppReader(base.SingleFrameReaderBase):
"""Reader for OpenMM Application layer objects
See also `the object definition in the OpenMM Application layer <http://docs.openmm.org/latest/api-python/generated/openmm.app.simulation.Simulation.html#openmm.app.simulation.Simulation>`_
.. versionadded:: 2.0.0
"""
format = "OPENMMAPP"
units = {"time": "ps", "length": "nm"}
@staticmethod
def _format_hint(thing):
"""Can this reader read *thing*?
"""
try:
from openmm import app
except ImportError:
try: # pragma: no cover
from simtk.openmm import app
except ImportError:
return False
else:
return isinstance(thing, (app.PDBFile, app.Modeller,
app.PDBxFile))
def _read_first_frame(self):
self.n_atoms = self.filename.topology.getNumAtoms()
self.ts = self._mda_timestep_from_omm_app()
if self.convert_units:
self.convert_pos_from_native(self.ts._pos)
if self.ts.dimensions is not None:
self.ts.triclinic_dimensions = self.convert_pos_from_native(
self.ts.triclinic_dimensions, inplace=False
)
self.ts.dimensions[3:] = _sanitize_box_angles(self.ts.dimensions[3:])
def _mda_timestep_from_omm_app(self):
""" Construct Timestep object from OpenMM Application object """
omm_object = self.filename
n_atoms = omm_object.topology.getNumAtoms()
ts = self._Timestep(n_atoms, **self._ts_kwargs)
ts.frame = 0
if omm_object.topology.getPeriodicBoxVectors() is not None:
ts.triclinic_dimensions = np.array(
omm_object.topology.getPeriodicBoxVectors()._value
)
ts.dimensions[3:] = _sanitize_box_angles(ts.dimensions[3:])
ts.positions = np.array(omm_object.getPositions()._value)
return ts
def _sanitize_box_angles(angles):
""" Ensure box angles correspond to first quadrant
See `discussion on unitcell angles <https://github.com/MDAnalysis/mdanalysis/pull/2917/files#r620558575>`_
"""
inverted = 180 - angles
return np.min(np.array([angles, inverted]), axis=0)
|
vipmunot/HackerRank
|
Data Structures/Arrays/Sparse Arrays.py
|
Python
|
mit
| 249 | 0.02008 |
n = int(input())
arr = []
for i in range(n):
arr.append(input())
q = int(input())
fo
|
r i in range(q):
query = input()
count = 0
for j in range(len(arr)):
if arr[j] == query:
count +=1
print(count)
| |
terhorst/psmcpp
|
smcpp/analysis/base.py
|
Python
|
gpl-3.0
| 6,269 | 0.001276 |
import numpy as np
import json
import sys
from .. import _smcpp, util, logging, data_filter
|
import smcpp.defaults
from smcpp.optimize.optimizers import SMCPPOptimizer, TwoPopulationOptimizer
from smcpp.optimize.plugins import analysis_saver, parameter_optimizer
logger = logging.get
|
Logger(__name__)
from ..model import SMCModel, SMCTwoPopulationModel
_model_cls_d = {cls.__name__: cls for cls in (SMCModel, SMCTwoPopulationModel)}
class BaseAnalysis:
"Base class for analysis of population genetic data."
def __init__(self, files, args):
# Misc. parameter initialiations
self._args = args
if args.cores is not None:
_smcpp.set_num_threads(args.cores)
self._N0 = .5e-4 / args.mu # .0001 = args.mu * 2 * N0
self._theta = 2. * self._N0 * args.mu
logger.info("theta: %f", self._theta)
if args.r is not None:
self._rho = 2 * self._N0 * args.r
else:
self._rho = self._theta
assert np.all(np.isfinite([self._rho, self._theta]))
logger.info("rho: %f", self._rho)
self._penalty = 0.
self._niter = args.em_iterations
if args.unfold:
args.polarization_error = 0.
logger.warning(
"Using unfolded SFS. The user should verify "
"that the ancestral allele has been correctly "
"coded."
)
if args.polarization_error > 0.:
logger.debug("Polarization error p=%f", args.polarization_error)
# Load data and apply transformations to normalize
pipe = self._pipeline = data_filter.DataPipeline(files)
pipe.add_filter(load_data=data_filter.LoadData())
pipe.add_filter(data_filter.RecodeNonseg(cutoff=args.nonseg_cutoff))
pipe.add_filter(data_filter.Compress())
pipe.add_filter(data_filter.BreakLongSpans(cutoff=100000))
pipe.add_filter(data_filter.DropSmallContigs(100000))
pipe.add_filter(watterson=data_filter.Watterson())
pipe.add_filter(
mutation_counts=data_filter.CountMutations(
w=int(2e-3 * self._N0 / self._rho)
)
)
@property
def hidden_states(self):
return self._hs
@hidden_states.setter
def hidden_states(self, hs):
hs = np.array(hs)
self._hs = {pop: hs for pop in self.populations}
@property
def populations(self):
return self._pipeline["load_data"].populations
def _init_optimizer(self, outdir, base, algorithm, xtol, ftol, single):
self._optimizer = self._OPTIMIZER_CLS(self, algorithm, xtol, ftol, single)
if outdir:
self._optimizer.register_plugin(analysis_saver.AnalysisSaver(outdir, base))
def rescale(self, x):
return x / (2. * self._N0)
def __len__(self):
return sum(len(c) for c in self.contigs)
def _init_inference_manager(self, polarization_error, hs):
## Create inference object which will be used for all further calculations.
logger.debug("Creating inference manager...")
d = {}
max_n = {}
a = {}
self._ims = {}
for c in self.contigs:
d.setdefault(c.pid, []).append(c)
max_n.setdefault(c.pid, -1)
max_n[c.pid] = np.maximum(max_n[c.pid], c.n)
a.setdefault(c.pid, []).append(tuple(c.a))
for pid in d:
logger.debug("Creating inference manager for %s", pid)
data = [c.data for c in d[pid]]
if len(pid) == 1:
im = _smcpp.PyOnePopInferenceManager(max_n[pid], data, hs[pid[0]], pid, polarization_error)
else:
assert len(pid) == 2
s = set(a[pid])
assert len(s) == 1
im = _smcpp.PyTwoPopInferenceManager(
*(max_n[pid]), *s.pop(), data, hs[pid[0]], pid, polarization_error
)
im.model = self._model
im.theta = self._theta
im.rho = self._rho
im.alpha = self._alpha = 1
self._ims[pid] = im
# @property
# def _data(self):
# return [c.data for c in self.contigs]
def run(self, niter=None):
"Perform the analysis."
self._optimizer.run(niter or self._niter)
def Q(self):
"Value of Q() function in M-step."
qq = [self._ims[pop].Q(separate=True) for pop in self._ims]
qr = self._penalty * self.model.regularizer()
qq = np.sum(qq)
ret = qq - qr
logger.debug("reg: %s", util.format_ad(qr))
logger.debug("Q: %s", util.format_ad(ret))
return ret
def E_step(self):
"Perform E-step."
logger.info("Running E-step")
for pop in self._ims:
self._ims[pop].E_step()
logger.info("E-step completed")
def loglik(self, reg=True):
"Log-likelihood of data after most recent E-step."
ll = sum([im.loglik() for im in self._ims.values()])
if reg:
ll -= self._penalty * float(self.model.regularizer())
return ll
@property
def model(self):
return self._model
@model.setter
def model(self, m):
self._model = m
for im in self._ims.values():
im.model = m
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, a):
self._alpha = a
for im in self._ims.values():
im.alpha = a
@property
def rho(self):
return self._rho
@rho.setter
def rho(self, r):
self._rho = r
for im in self._ims.values():
im.rho = r
@property
def contigs(self):
return list(self._pipeline.results())
@property
def npop(self):
"The number of populations contained in this analysis."
return len(self.populations)
def dump(self, filename):
"Dump result of this analysis to :filename:."
d = {"theta": self._theta, "rho": self._rho, "alpha": self._alpha}
d["model"] = self.model.to_dict()
d["hidden_states"] = {k: list(v) for k, v in self.hidden_states.items()}
json.dump(d, open(filename + ".json", "wt"), sort_keys=True, indent=4)
|
praekeltfoundation/ndoh-hub
|
registrations/management/commands/upload_clinic_codes.py
|
Python
|
bsd-3-clause
| 2,626 | 0.001142 |
from csv import DictReader
from django.core.management.base import BaseCommand
from registrations.models import ClinicCode
class Command(BaseCommand):
help = (
"This command takes in a CSV with the columns: uid, code, facility, province,"
"and location, and creates/updates the cliniccodes in the database."
"This will only add or update, it will not remove"
)
def add_arguments(self, parser):
parser.add_argument("data_csv", type=str, help=("The CSV with the data in it"))
def normalise_location(self, location):
"""
Normalises the location from `[longitude,latitude]` to ISO6709
"""
def fractional_part(f):
if not float(f) % 1:
return ""
parts = f.split(".")
return f".{parts[1]}"
try:
longitude, latitude = location.strip("[]").split(",")
return (
f"{int(float(latitude)):+03d}{fractional_part(latitude)}"
f"{int(float(longitude)):+04d}{fractional_part(longitude)}"
"/"
)
except (AttributeError, ValueError, TypeError):
|
return None
def handle(self, *args, **kwargs):
updated = 0
|
created = 0
with open(kwargs["data_csv"]) as f:
reader = DictReader(f)
for row in reader:
_, new = ClinicCode.objects.update_or_create(
uid=row["uid"].strip(),
defaults={
"code": row["code"].strip(),
"value": row["code"].strip(),
"name": row["facility"].strip(),
"province": {
"ec": "ZA-EC",
"fs": "ZA-FS",
"gp": "ZA-GT",
"kz": "ZA-NL",
"lp": "ZA-LP",
"mp": "ZA-MP",
"nc": "ZA-NC",
"nw": "ZA-NW",
"wc": "ZA-WC",
}[row["province"].strip()[:2].lower()],
"location": self.normalise_location(row["location"].strip()),
},
)
if new:
created += 1
else:
updated += 1
self.success(f"Updated {updated} and created {created} clinic codes")
def log(self, level, msg):
self.stdout.write(level(msg))
def success(self, msg):
self.log(self.style.SUCCESS, msg)
|
sjug/perf-tests
|
verify/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 4,896 | 0.003881 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache L
|
icense, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR
|
CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
def get_refs():
refs = {}
for path in glob.glob(os.path.join(rootdir, "verify/boilerplate/boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
extension = file_extension(filename)
ref = refs[extension]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_output', '.git', 'vendor']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
extension = file_extension(pathname)
if extension in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015,... till current year, company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
akscram/lollipop-jsonschema
|
lollipop_jsonschema/jsonschema.py
|
Python
|
mit
| 5,661 | 0.00159 |
__all__ = [
'json_schema',
]
import lollipop.types as lt
import lollipop.validators as lv
from lollipop.utils import identity
from collections import OrderedDict
from .compat import iteritems
def find_validators(schema, validator_type):
return [validator
for validator in schema.validators
if isinstance(validator, validator_type)]
def json_schema(schema):
"""Convert Lollipop schema to JSON schema"""
js = OrderedDict()
if schema.name:
js['title'] = schema.name
if schema.description:
js['description'] = schema.description
any_of_validators = find_validators(schema, lv.AnyOf)
if any_of_validators:
choices = set(any_of_validat
|
ors[0].choices)
for validator in any_of_validators[1:]:
choices = choices.intersection(set(validator.choices))
if not choices:
raise ValueError('AnyOf constraints choices does not allow any values')
js['enum'] = lis
|
t(schema.dump(choice) for choice in choices)
return js
none_of_validators = find_validators(schema, lv.NoneOf)
if none_of_validators:
choices = set(none_of_validators[0].values)
for validator in none_of_validators[1:]:
choices = choices.union(set(validator.values))
if choices:
js['not'] = {'enum': list(schema.dump(choice) for choice in choices)}
if isinstance(schema, lt.Any):
pass
elif isinstance(schema, lt.String):
js['type'] = 'string'
length_validators = find_validators(schema, lv.Length)
if length_validators:
if any(v.min for v in length_validators) or \
any(v.exact for v in length_validators):
js['minLength'] = max(v.exact or v.min for v in length_validators)
if any(v.max for v in length_validators) or \
any(v.exact for v in length_validators):
js['maxLength'] = min(v.exact or v.max for v in length_validators)
regexp_validators = find_validators(schema, lv.Regexp)
if regexp_validators:
js['pattern'] = regexp_validators[0].regexp.pattern
elif isinstance(schema, lt.Number):
if isinstance(schema, lt.Integer):
js['type'] = 'integer'
else:
js['type'] = 'number'
range_validators = find_validators(schema, lv.Range)
if range_validators:
if any(v.min for v in range_validators):
js['minimum'] = max(v.min for v in range_validators if v.min)
if any(v.max for v in range_validators):
js['maximum'] = min(v.max for v in range_validators if v.max)
elif isinstance(schema, lt.Boolean):
js['type'] = 'boolean'
elif isinstance(schema, lt.List):
js['type'] = 'array'
js['items'] = json_schema(schema.item_type)
length_validators = find_validators(schema, lv.Length)
if length_validators:
if any(v.min for v in length_validators) or \
any(v.exact for v in length_validators):
js['minItems'] = min(v.exact or v.min for v in length_validators)
if any(v.max for v in length_validators) or \
any(v.exact for v in length_validators):
js['maxItems'] = min(v.exact or v.max for v in length_validators)
unique_validators = find_validators(schema, lv.Unique)
if unique_validators and any(v.key is identity for v in unique_validators):
js['uniqueItems'] = True
elif isinstance(schema, lt.Tuple):
js['type'] = 'array'
js['items'] = [json_schema(item_type) for item_type in schema.item_types]
elif isinstance(schema, lt.Object):
js['type'] = 'object'
js['properties'] = OrderedDict(
(k, json_schema(v.field_type))
for k, v in iteritems(schema.fields)
)
required = [
k
for k, v in iteritems(schema.fields)
if not isinstance(v.field_type, lt.Optional)
]
if required:
js['required'] = required
if schema.allow_extra_fields in [True, False]:
js['additionalProperties'] = schema.allow_extra_fields
elif isinstance(schema.allow_extra_fields, lt.Field):
field_type = schema.allow_extra_fields.field_type
if isinstance(field_type, lt.Any):
js['additionalProperties'] = True
else:
js['additionalProperties'] = json_schema(field_type)
elif isinstance(schema, lt.Dict):
js['type'] = 'object'
fixed_properties = schema.value_types \
if hasattr(schema.value_types, 'keys') else {}
properties = OrderedDict(
(k, json_schema(v))
for k, v in iteritems(fixed_properties)
)
if properties:
js['properties'] = properties
required = [
k
for k, v in iteritems(fixed_properties)
if not isinstance(v, lt.Optional)
]
if required:
js['required'] = required
if hasattr(schema.value_types, 'default'):
js['additionalProperties'] = json_schema(schema.value_types.default)
elif isinstance(schema, lt.Constant):
js['const'] = schema.value
elif isinstance(schema, lt.Optional):
js.update(json_schema(schema.inner_type))
default = schema.load_default()
if default:
js['default'] = schema.inner_type.dump(default)
elif hasattr(schema, 'inner_type'):
js.update(json_schema(schema.inner_type))
return js
|
tensorflow/moonlight
|
moonlight/staves/staffline_distance.py
|
Python
|
apache-2.0
| 10,559 | 0.00483 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements staffline distance estimation.
The staffline distance is the vertical distance between consecutive lines in a
staff, which is assumed to be uniform for a single staff on a scanned music
score. The staffline thickness is the vertical height of each staff line, which
is assumed to be uniform for the entire page.
Uses the algorithm described in [1], which creates a histogram of possible
staffline distance and thickness values for the entire image, based on the
vertical run-length encoding [2]. Each consecutive pair of black and white runs
contributes to the staffline distance histogram (because they may be the
staffline followed by an unobstructed space, or vice versa). We then take the
argmax of the histogram, and find candidate staff line runs. These runs must be
before or after another run, such that the sum of the run lengths is the
detected staffline distance. Then the black run is considered to be an actual
staff line, and its length contributes to the staffline thickness histogram.
Although we use a single staffline distance value for staffline thickness
detection, we may detect multiple distinct peaks in the histogram. We then run
staff detection using each distinct peak value, to detect smaller staves with an
unusual size, e.g. ossia parts [3].
[1] Cardoso, Jaime S., and Ana Rebelo. "Robust staffline thickness and distance
estimation in binary and gray-level music scores." 20th International
Conference on Pattern Recognition (ICPR). IEEE, 2010.
[2] https://en.wikipedia.org/wiki/Run-length_encoding
[3] https://en.wikipedia.org/wiki/Ossia
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from moonlight.util import run_length
from moonlight.util import segments
# The size of the histograms. Normal values for the peak are around 20 for
# staffline distance, and 2-3 for staffline thickness.
_MAX_STAFFLINE_DISTANCE_THICKNESS_VALUE = 256
# The minimum number of votes for a staffline distance bin. We expect images to
# be a reasonable size (> 100x100), and want to ensure we exclude images that
# don't contain any staves.
_MIN_STAFFLINE_DISTANCE_SCORE = 10000
# The maximum allowed number of unique staffline distances. If more staffline
# distances are detected, return an empty list instead.
_MAX_ALLOWED_UNIQUE_STAFFLINE_DISTANCES = 3
_STAFFLINE_DISTANCE_INVALIDATE_DISTANCE = 1
_STAFFLINE_THICKNESS_INVALIDATE_DISTANCE = 1
_PEAK_CUTOFF = 0.5
def _single_peak(values, relative_cutoff, minval, invalidate_distance):
"""Takes a single peak if it is high enough compared to all other peaks.
Args:
values: 1D tensor of values to take the peaks on.
relative_cutoff: The fraction of the highest peak which all other peaks
should be below.
minval: The peak should have at least this value.
invalidate_distance: Exclude values that are up to invalidate_distance away
from the peak.
Returns:
The index of the single peak in `values`, or -1 if there is not a single
peak that satisfies `relative_cutoff`.
"""
relative_cutoff = tf.convert_to_tensor(relative_cutoff, tf.float32)
# argmax is safe because the histogram is always non-empty.
peak = tf.to_int32(tf.argmax(values))
# Take values > minval away from the peak.
other_values = tf.boolean_mask(
values,
tf.greater(
tf.abs(tf.range(tf.shape(values)[0]) - peak), invalidate_distance))
should_take_peak = tf.logical_and(
tf.greater_equal(values[peak], minval),
# values[peak] * relative_cutoff must be >= other_values.
tf.reduce_all(
tf.greater_equal(
tf.to_float(values[peak]) * relative_cutoff,
tf.to_float(other_values))))
return tf.cond(should_take_peak, lambda: peak, lambda: -1)
def _estimate_staffline_distance(columns, lengths):
"""Estimates the staffline distances of a music score.
Args:
columns: 1D array. The column indices of each vertical run.
lengths: 1D array. The length of each consecutive vertical run.
Returns:
A 1D tensor of possible staffline distances in the image.
"""
with tf.name_scope('estimate_staffline_distance'):
run_pair_lengths = lengths[:-1] + lengths[1:]
keep_pair = tf.equal(columns[:-1], columns[1:])
staffline_distance_histogram = tf.bincount(
tf.boolean_mask(run_pair_lengths, keep_pair),
# minlength required to avoid errors on a fully white image.
minlength=_MAX_STAFFLINE_DISTANCE_THICKNESS_VALUE,
maxlength=_MAX_STAFFLINE_DISTANCE_THICKNESS_VALUE)
peaks = segments.peaks(
staffline_distance_histogram,
minval=_MIN_STAFFLINE_DISTANCE_SCORE,
invalidate_distance=_STAFFLINE_DISTANCE_INVALIDATE_DISTANCE)
def do_filter_peaks():
"""Process the peaks if they are non-empty.
Returns:
The filtered peaks. Peaks below the cutoff when compared to the highest
peak are removed. If the peaks are invalid, then an empty list is
returned.
"""
histogram_size = tf.shape(staffline_distance_histogram)[0]
peak_values = tf.to_float(tf.gather(staffline_distance_histogram, peaks))
max_value = tf.reduce_max(peak_values)
allowed_peaks = tf.greater_equal(peak_values,
max_value * tf.constant(_PEAK_CUTOFF))
# Check if there are too many detected staffline distances, and we should
# return an empty list.
allowed_peaks &= tf.less_equal(
tf.reduce_sum(tf.to_int32(allowed_peaks)),
_MAX_ALLOWED_UNIQUE_STAFFLINE_DISTANCES)
# Check if any values sufficiently far away from the peaks are too high.
# This means the peaks are not sharp enough and we should return an empty
# list.
far_from_peak = tf.greater(
tf.reduce_min(
tf.abs(tf.range(histogram_size)[None, :] - peaks[:, None]),
axis=0), _STAFFLINE_DISTANCE_INVALIDATE_DISTANCE)
allowed_peaks &= tf.less(
tf.to_float(
tf.reduce_max(
tf.boolean_mask(staffline_distance_histogram,
far_from_peak))),
max_value * tf.constant(_PEAK_CUTOFF))
return tf.boolean_mask(peaks, allowed_peaks)
return tf.cond(
tf.greater(tf.shape(peaks)[0], 0), do_filter_peaks,
lambda: tf.identity(peaks))
def _estimate_staffline_thickness(columns, values, lengths, staffline_distance):
"""Estimates the staffline thickness of a music score.
Args:
columns: 1D array. The column indices of each consecutive vertical run.
values: 1D array. The value (0 or 1) of each vertical run.
lengths: 1D array. The length of each vertical run.
staffline_distance: A 1D tensor of the possible sta
|
ffline distances in the
image. One of the distances may be chosen arbitrarily.
Returns:
A scalar tensor with the staffline thickness for the en
|
tire page, or -1 if
it could not be estimated (staffline_distance is empty, or there are not
enough runs to estimate the staffline thickness).
"""
with tf.name_scope('estimate_staffline_thickness'):
def do_estimate():
"""Compute the thickness if distance detection was successful."""
run_pair_lengths = lengths[:-1] + lengths[1:]
# Use the smallest staffline distance to estimate the staffline thickness.
keep_pair = tf.logical_and(
tf.equal(columns[:-1], columns[1:]),
tf.equal(run_pair_lengths, staffline_distance[0]))
run_pair_lengths = tf.boo
|
mastizada/kuma
|
kuma/core/tests/__init__.py
|
Python
|
mpl-2.0
| 6,376 | 0.000471 |
from django.conf import settings, UserSettingsHolder
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.test.client import Client
from django.utils.functional import wraps
from django.utils.importlib import import_module
import constance.config
from constance.backends import database as constance_database
from nose import SkipTest
from nose.tools import eq_
import test_utils
from ..exceptions import FixtureMissingError
from ..urlresolvers import split_path, reverse
get = lambda c, v, **kw: c.get(reverse(v, **kw), follow=True)
post = lambda c, v, data={}, **kw: c.post(reverse(v, **kw), data, follow=True)
def attrs_eq(received, **expected):
"""Compares received's attributes with expected's kwargs."""
for k, v in expected.iteritems():
eq_(v, getattr(received, k))
def get_user(username='testuser'):
"""Return a django user or raise FixtureMissingError"""
try:
return User.objects.get(username=username)
except User.DoesNotExist:
raise FixtureMissingError(
'Username "%s" not found. You probably forgot to import a'
' users fixture.' % username)
class overrider(object):
"""
See http://djangosnippets.org/snippets/2437/
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def enable(self):
pass
def disable(self):
pass
class override_constance_settings(overrider):
"""Decorator / context manager to override constance settings and defeat
its caching."""
def enable(self):
self.old_cache = constance_database.db_cache
constance_database.db_cache = None
self.old_settings = dict((k, getattr
|
(constance.config, k))
for k in dir(constance.config))
for k, v in self.options.items():
constance.config._backend.set(k, v)
def disable(self):
for k, v in self.old_settings.items():
constance.co
|
nfig._backend.set(k, v)
constance_database.db_cache = self.old_cache
class override_settings(overrider):
"""Decorator / context manager to override Django settings"""
def enable(self):
self.old_settings = settings._wrapped
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.old_settings
def mock_lookup_user():
return {u'confirmed': True,
u'country': u'us',
u'created-date': u'12/8/2013 8:05:55 AM',
u'email': u'testuser@test.com',
u'format': u'H',
u'lang': u'en-US',
u'master': True,
u'newsletters': [],
u'pending': False,
u'status': u'ok',
u'token': u'cdaa9e5d-2023-5f59-974d-83f6a29514ec'}
class SessionAwareClient(Client):
"""
Just a small override to patch the session property to be able to
use the sessions.
"""
def _session(self):
"""
Obtains the current session variables.
Backported the else clause from Django 1.7 to make sure there
is a session available during tests.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
else:
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
return {}
session = property(_session)
class LocalizingMixin(object):
def request(self, **request):
"""Make a request, but prepend a locale if there isn't one already."""
# Fall back to defaults as in the superclass's implementation:
path = request.get('PATH_INFO', self.defaults.get('PATH_INFO', '/'))
locale, shortened = split_path(path)
if not locale:
request['PATH_INFO'] = '/%s/%s' % (settings.LANGUAGE_CODE,
shortened)
return super(LocalizingMixin, self).request(**request)
class LocalizingClient(LocalizingMixin, SessionAwareClient):
"""Client which prepends a locale so test requests can get through
LocaleURLMiddleware without resulting in a locale-prefix-adding 301.
Otherwise, we'd have to hard-code locales into our tests everywhere or
{mock out reverse() and make LocaleURLMiddleware not fire}.
"""
# If you use this, you might also find the force_locale=True argument to
# kuma.core.urlresolvers.reverse() handy, in case you need to force locale
# prepending in a one-off case or do it outside a mock request.
class KumaTestCase(test_utils.TestCase):
client_class = SessionAwareClient
localizing_client = False
skipme = False
@classmethod
def setUpClass(cls):
if cls.skipme:
raise SkipTest
if cls.localizing_client:
cls.client_class = LocalizingClient
super(KumaTestCase, cls).setUpClass()
def get_messages(self, request):
# Django 1.4 RequestFactory requests can't be used to test views that
# call messages.add (https://code.djangoproject.com/ticket/17971)
# FIXME: HACK from http://stackoverflow.com/q/11938164/571420
messages = FallbackStorage(request)
request._messages = messages
return messages
class SkippedTestCase(KumaTestCase):
skipme = True
|
angelapper/edx-platform
|
openedx/core/djangoapps/user_api/api.py
|
Python
|
agpl-3.0
| 35,762 | 0.002489 |
import copy
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django_countries import countries
import accounts
import third_party_auth
from edxmako.shortcuts import marketing_link
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.helpers import FormDescription
from openedx.features.enterprise_support.api import enterprise_customer_for_request
from student.forms import get_registration_extension_form
from student.models import UserProfile
def get_password_reset_form():
"""Return a description of the password reset form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("password_change_request"))
# Translators: This label appears above a field on the password reset
# form meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the password reset form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
# Translators: These instructions appear on the password reset form,
# immediately below a field meant to hold the user's email address.
email_instructions = _(u"The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": accounts.EMAIL_MIN_LENGTH,
"max_length": accounts.EMAIL_MAX_LENGTH,
}
)
return form_desc
def get_login_session_form():
"""Return a description of the login form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_login_session"))
# Translators: This label appears above a field on the login form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the login form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
# Translators: These instructions appear on the login form, immediately
# below a field meant to hold the user's email address.
email_instructions = _("The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": accounts.EMAIL_MIN_LENGTH,
"max_length": accounts.EMAIL_MAX_LENGTH,
}
)
# Translators: This label appears above a field on the login form
# meant to hold the user's passw
|
ord.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"max_length": accounts.PASSWORD_MAX_LENGTH,
}
)
form_desc.add_field(
"remember",
field_type="checkbox",
label=_("Remember me"),
default=False,
required=False,
)
return form_desc
class RegistrationFormFactory(object):
"""HTTP end-points for creating a new
|
user. """
DEFAULT_FIELDS = ["email", "name", "username", "password"]
EXTRA_FIELDS = [
"confirm_email",
"first_name",
"last_name",
"city",
"state",
"country",
"gender",
"year_of_birth",
"level_of_education",
"company",
"title",
"mailing_address",
"goals",
"honor_code",
"terms_of_service",
"profession",
"specialty",
]
def _is_field_visible(self, field_name):
"""Check whether a field is visible based on Django settings. """
return self._extra_fields_setting.get(field_name) in ["required", "optional"]
def _is_field_required(self, field_name):
"""Check whether a field is required based on Django settings. """
return self._extra_fields_setting.get(field_name) == "required"
def __init__(self):
# Backwards compatibility: Honor code is required by default, unless
# explicitly set to "optional" in Django settings.
self._extra_fields_setting = copy.deepcopy(configuration_helpers.get_value('REGISTRATION_EXTRA_FIELDS'))
if not self._extra_fields_setting:
self._extra_fields_setting = copy.deepcopy(settings.REGISTRATION_EXTRA_FIELDS)
self._extra_fields_setting["honor_code"] = self._extra_fields_setting.get("honor_code", "required")
# Check that the setting is configured correctly
for field_name in self.EXTRA_FIELDS:
if self._extra_fields_setting.get(field_name, "hidden") not in ["required", "optional", "hidden"]:
msg = u"Setting REGISTRATION_EXTRA_FIELDS values must be either required, optional, or hidden."
raise ImproperlyConfigured(msg)
# Map field names to the instance method used to add the field to the form
self.field_handlers = {}
valid_fields = self.DEFAULT_FIELDS + self.EXTRA_FIELDS
for field_name in valid_fields:
handler = getattr(self, "_add_{field_name}_field".format(field_name=field_name))
self.field_handlers[field_name] = handler
field_order = configuration_helpers.get_value('REGISTRATION_FIELD_ORDER')
if not field_order:
field_order = settings.REGISTRATION_FIELD_ORDER or valid_fields
# Check that all of the valid_fields are in the field order and vice versa, if not set to the default order
if set(valid_fields) != set(field_order):
field_order = valid_fields
self.field_order = field_order
def get_registration_form(self, request):
"""Return a description of the registration form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
This is especially important for the registration form,
since different edx-platform installations might
collect different demographic information.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Arguments:
request (HttpRequest)
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_registration"))
self._apply_third_party_auth_overrides(request, form_desc)
# Custom form fields can be added via the form set in settings.REGISTRATION_EXTENSION_FORM
custom_form = get_registration_extension_form()
if custom_form:
# Default fields are always required
for field_name in self.DEFAULT_FIELDS:
self.field_handlers[field_name](form_desc, required=True)
for field_name, field in custom_form.fields.items():
restrictions = {}
if getattr(field, 'max_length', None):
restrictions['max_length'] = field.max_length
if getattr(field, 'min_length', None):
|
skyostil/tracy
|
src/generator/Cheetah/Tests/unittest_local_copy.py
|
Python
|
mit
| 34,313 | 0.003934 |
#!/usr/bin/env python
""" This is a hacked version of PyUnit that extends its reporting capabilities
with optional meta data on the test cases. It also makes it possible to
separate the standard and error output streams in TextTestRunner.
It's a hack rather than a set of subclasses because a) Steve had used double
underscore private attributes for some things I needed access to, and b) the
changes affected so many classes that it was easier just to hack it.
The changes are in the following places:
TestCase:
- minor refactoring of __init__ and __call__ internals
- added some attributes and methods for storing and retrieving meta data
_TextTestResult
- refactored the stream handling
- incorporated all the output code from TextTestRunner
- made the output of FAIL and ERROR information more flexible and
incorporated the new meta data from TestCase
- added a flag called 'explain' to __init__ that controls whether the new '
explanation' meta data from TestCase is printed along with tracebacks
TextTestRunner
- delegated all output to _TextTestResult
- added 'err' and 'explain' to the __init__ signature to match the changes
in _TextTestResult
TestProgram
- added -e and --explain as flags on the command line
-- Tavis Rudd <tavis@redonions.net> (Sept 28th, 2001)
- _TestTextResult.printErrorList(): print blank line after each traceback
-- Mike Orr <mso@oz.net> (Nov 11, 2002)
TestCase methods copied from unittest in Python 2.3:
- .assertAlmostEqual(first, second, places=7, msg=None): to N decimal places.
- .failIfAlmostEqual(first, second, places=7, msg=None)
-- Mike Orr (Jan 5, 2004)
Below is the original docstring for unittest.
---------------------------------------------------------------------------
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmenticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEquals((1 + 2), 3)
self.assertEquals(0 + 1, 1)
def testMultiply(self);
self.assertEquals((0 * 10), 0)
self.assertEquals((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://pyunit.sourceforge.net/
Copyright (c) 1999, 2000, 2001 Steve Purcell
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
""
|
"
__author__ = "Steve Purcell"
__email__ = "stephen_purcell at yahoo dot com"
__revision__ = "$Revision: 1.1 $"[11:-2]
##################################################
## DEPENDENCIES ##
import os
import re
import string
import sys
import time
import traceback
import types
import pprint
##################################################
## CONSTANTS & GLOBALS
try:
True,False
except NameError:
True, Fals
|
e = (1==1),(1==0)
##############################################################################
# Test framework core
##############################################################################
class TestResult:
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is a
tuple of values as returned by sys.exc_info().
"""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = 0
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun = self.testsRun + 1
def stopTest(self, test):
"Called when the given test has been run"
pass
def addError(self, test, err):
"Called when an error has occurred"
self.errors.append((test, err))
def addFailure(self, test, err):
"Called when a failure has occurred"
self.failures.append((test, err))
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = 1
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(self.__class__, self.testsRun, len(self.errors),
len(self.failures))
class TestCase:
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# the name of the fixture. Used for displaying meta data about the test
name = None
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._setupTestMethod()
self._setupMetaData()
def _setupTestMethod(self):
try:
self._testMethod = getattr(self, self._testMethodName)
except AttributeError:
raise ValueError, "no such test method in %s: %s" % \
(self.__class__, self._testMethodName)
## meta data methods
def _setupMetaData(self):
"""Setup the default meta data for the test case:
- id: self.__class__.__name__ + testMethodName OR self.name + testMethodName
- description: 1st line of Class docstring + 1st line of method docstring
|
bzero/bitex
|
apps/api_receive/api_receive_application.py
|
Python
|
gpl-3.0
| 4,247 | 0.014834 |
import ssl
import logging
import tornado.ioloop
import tornado.web
import sys
from tornado import httpclient
from functools import partial
from sqlalchemy import create_engine, func
from sqlalchemy.orm import scoped_session, sessionmaker
from create_receive_handler import ReceiveHandler
from wallet_notify_handler import WalletNotifyHandler
from block_notify_handler import BlockNotifyHandler
from authproxy import AuthServiceProxy
class ApiReceiveApplication(tornado.web.Application):
def __init__(self, options, instance_name):
self.options = options
self.instance_name = instance_name
handlers = [
(r"/api/receive", ReceiveHandler),
(r"/api/walletnotify/(?P<txid>[^\/]+)", WalletNotifyHandler),
(r"/api/blocknotify/(?P<hash>[^\/]+)", BlockNotifyHandler),
]
settings = dict(
cookie_secret='cookie_secret'
)
tornado.web.Application.__init__(self, handlers, **settings)
input_log_file_handler = logging.handlers.TimedRotatingFileHandler( self.options.log, when='MIDNIGHT')
formatter = logging.Formatter('%(asctime)s - %(message)s')
input_log_file_handler.setFormatter(formatter)
self.bitcoind = AuthServiceProxy(self.options.rpc_url )
self.paytxfee = self.bitcoind.getinfo()['paytxfee']
self.replay_logger = logging.getLogger(self.instance_name)
self.replay_logger.setLevel(logging.DEBUG)
self.replay_logger.addHandler(input_log_file_handler)
self.replay_logger.info('START')
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
self.replay_logger.addHandler(ch)
from models import Base, db_bootstrap
engine = create_engine( self.options.db_engine, echo=self.options.db_echo)
Base.metadata.create_all(engine)
self.db_session = scoped_session(sessionmaker(bind=engine))
db_bootstrap(self.db_session)
self.log_start_data()
def invoke_callback_url(self, forwarding_address):
url = forwarding_address.get_callback_url()
self.log('EXECUTE', 'curl ' + url)
context = ssl._create_unverified_context()
http_client = httpclient.AsyncHTTPClient(defaults=dict(ssl_options=context))
http_client.fetch(url, partial(self.on_handle_callback_url, forwarding_address.id ))
def on_handle_callback_url(self, forwarding_address_id, response ):
|
from models import ForwardingAddress
forwarding_address = ForwardingAddress.get_by_id(self.db_session, forwarding_address_id)
if response.error:
self.log('ERROR', str(response.error))
forwarding_address.callback_number_of_errors += 1
self.db_session.add(forwarding_address)
self.db_session.commit()
else:
if response.body == '*ok*':
forwarding_address.is_confirm
|
ed_by_client = True
self.db_session.add(forwarding_address)
self.db_session.commit()
def log(self, command, key, value=None):
#if len(logging.getLogger().handlers):
# logging.getLogger().handlers = [] # workaround to avoid stdout logging from the root logger
log_msg = command + ',' + key
if value:
try:
log_msg += ',' + value
except Exception,e :
try:
log_msg += ',' + str(value)
except Exception,e :
try:
log_msg += ',' + unicode(value)
except Exception,e :
log_msg += ', [object]'
self.replay_logger.info( log_msg )
def log_start_data(self):
self.log('PARAM','BEGIN')
self.log('PARAM','port' ,self.options.port)
self.log('PARAM','log' ,self.options.log)
self.log('PARAM','db_echo' ,self.options.db_echo)
self.log('PARAM','db_engine' ,self.options.db_engine)
self.log('PARAM','rpc_url' ,self.options.rpc_url)
self.log('PARAM','END')
from models import ForwardingAddress
fwd_address_list = self.db_session.query(ForwardingAddress)
for fwd_address in fwd_address_list:
self.log('DB_ENTITY', 'FORWARDING_ADDRESS', fwd_address)
bitcoin_info = self.bitcoind.getinfo()
self.log('INFO', 'BITCOIND_GETINFO', str(bitcoin_info))
def clean_up(self):
pass
|
jsmesami/naovoce
|
src/fruit/migrations/0003_added_indexes.py
|
Python
|
bsd-3-clause
| 702 | 0.002849 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.ti
|
mezone
class Migration(migrations.Migration):
dependencies = [
('fruit', '0002_fruit_cover_image'),
]
operations = [
migrations.AlterField(
model_name='fruit',
|
name='created',
field=models.DateTimeField(verbose_name='created', db_index=True, editable=False, default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='fruit',
name='deleted',
field=models.BooleanField(verbose_name='deleted', db_index=True, default=False),
),
]
|
debomatic/debomatic
|
docs/conf.py
|
Python
|
gpl-3.0
| 1,509 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2021 Luca Falavigna
#
# Author: Luca Falavigna <dktrkranz@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Deb-o-Matic documentation build configuration file
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Deb-o-Matic'
copyright = '2007-2021, Luca Falavigna'
version = '0.25'
release = '0.25'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_use_index = True
htmlhelp_basename = 'Deb-o-Maticdoc'
latex_documents = [
('index', 'Deb-o-Matic.tex', 'Deb-o-Matic Documentation',
'Luca Falavigna', 'manual', 'True')]
latex_elements =
|
{
'classoptions': ',oneside',
'babel': '\\usepackage[english]{babel}'}
man_pages = [
('index', 'deb-o-matic', 'Deb-o-Matic Doc
|
umentation',
['Luca Falavigna'], 1)]
|
saltstack/salt
|
tests/pytests/functional/states/conftest.py
|
Python
|
apache-2.0
| 178 | 0 |
import pytest
|
@pytest.fixture(scope="module")
def states(loaders):
return loaders.states
@pytest.fixture(scope="module")
def modules(loaders):
return loa
|
ders.modules
|
ooici/marine-integrations
|
mi/dataset/parser/flord_l_wfp_sio_mule.py
|
Python
|
bsd-2-clause
| 8,191 | 0.021121 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.flord_l_wfp_sio_mule
@file marine-integrations/mi/dataset/parser/flord_l_wfp_sio_mule.py
@author Maria Lutz
@brief Parser for the flord_l_wfp_sio_mule dataset driver
Release notes:
Initial Release
"""
__author__ = 'Maria Lutz'
__license__ = 'Apache 2.0'
import re
import struct
import ntplib
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle, DataParticleKey
from mi.core.exceptions import SampleException, DatasetParserException, UnexpectedDataException
from mi.dataset.parser.sio_mule_common import SioMuleParser, SIO_HEADER_MATCHER
from mi.dataset.parser.WFP_E_file_common import HEADER_BYTES, STATUS_BYTES, STATUS_BYTES_AUGMENTED, STATUS_START_MATCHER
E_HEADER_REGEX = b'(\x00\x01\x00{5,5}\x01\x00{7,7}\x01)([\x00-\xff]{8,8})' # E header regex for global sites
E_HEADER_MATCHER = re.compile(E_HEADER_REGEX)
E_GLOBAL_SAMPLE_BYTES = 30
class DataParticleType(BaseEnum):
SAMPLE = 'flord_l_wfp_instrument'
class FlordLWfpSioMuleParserDataParticleKey(BaseEnum):
# params collected for the flord_l_wfp_instrument stream
RAW_SIGNAL_CHL = 'raw_signal_chl'
RAW_SIGNAL_BETA = 'raw_signal_beta' # corresponds to 'ntu' from E file
RAW_INTERNAL_TEMP = 'raw_internal_temp'
WFP_TIMESTAMP = 'wfp_timestamp'
class FlordLWfpSioMuleParserDataParticle(DataParticle):
_data_particle_type = DataParticleType.SAMPLE
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
fields_prof = struct.unpack('>I f f f f f h h h', self.raw_data)
result = [self._encode_value(FlordLWfpSioMuleParserDataParticleKey.RAW_SIGNAL_CHL, fields_prof[6], int),
self._encode_value(FlordLWfpSioMuleParserDataParticleKey.RAW_SIGNAL_BETA, fields_prof[7], int),
self._encode_value(FlordLWfpSioMuleParserDataParticleKey.RAW_INTERNAL_TEMP, fields_prof[8], int),
self._encode_value(FlordLWfpSioMuleParserDataParticleKey.WFP_TIMESTAMP, fields_prof[0], int)]
return result
class FlordLWfpSioMuleParser(SioMuleParser):
def __init__(self,
config,
state,
stream_handle,
state_callback,
publish_callback,
exception_callback,
*args, **kwargs):
super(FlordLWfpSioMuleParser, self).__init__(config,
stream_handle,
state,
self.sieve_function,
state_callback,
publish_callback,
exception_callback,
*args,
**kwargs)
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
(timestamp, chunk) = self._chunker.get_next_data()
while (chunk != None):
# Parse/match the SIO header
sio_header_match = SIO_HEADER_MATCHER.match(chunk)
end_of_header = sio_header_match.end(0)
sample_count = 0
if sio_header_match.group(1) == 'WE':
log.trace('read_state: %s', self._read_state)
# Parse/match the E file header
e_header_match = E_HEADER_MATCHER.search(chunk[end_of_header:end_of_header+HEADER_BYTES])
if e_header_match:
payload = chunk[end_of_header+HEADER_BYTES:-1] # '-1' to remove the '\x03' end-of-record marker
data_split = self.we_split_function(payload)
if data_split:
for ii in range(0,len(data_split)):
e_record = payload[data_split[ii][0]:data_split[ii][1]]
if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):
fields = struct.unpack('>I', e_record[0:4])
self._timestamp = ntplib.system_to_ntp_time(float(fields[0]))
if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
sample = self._extract_sample(FlordLWfpSioMuleParserDataParticle,
None,
e_record,
self._timestamp)
if sample:
# create particle
result_particles.append(sample)
sample_count += 1
else:
self._exception_callback(UnexpectedDataException("Found unexpected data."))
else: # no e header match
self._exception_callback(UnexpectedDataException("Found unexpected data."))
self._chunk_sample_count.append(sample_count)
(timestamp, chunk) = self._chunker.get_next_data()
return result_particles
def we_split_function(self, raw_data):
"""
Sort through the raw data to identify new blocks of data that need processing.
"""
form_list = []
"""
The Status messages can have an optional 2 bytes on the end, and since the
rest of the data consists of relatively unformated packed binary records,
detecting the presence of that optional 2 bytes can be difficult. The only
pattern we have to detect is the STATUS_START field ( 4 bytes FF FF FF F[A-F]).
We peel this appart by parsing backwards, using the end-of-record as an
additional anchor point.
"""
parse_end_point = len(raw_data)
while parse_end_point > 0:
# look for a status message at postulated message header position
header_start = STATUS_BYTES_AUGMENTED
# look for an augmented status
if STATUS_START_MATCHER.match(raw_data[parse_end_point-STATUS_BYTES_AUGMENTED:parse_end_point]):
# A hit for the status message at the augmented offset
# NOTE, we don't need the status messages and only deliver a stream of
# samples to build_parsed_values
parse_end_point = parse_end_point-STATUS_BYTES_AUGMENTED
# check if this is an unaugmented status
elif STATUS_START_MATCHER.match(raw_data[parse_end_point-STATUS_BYTES:parse_end_point]):
# A hit for the status message at the unaugmented offset
# NOTE: same as above
parse_end_point = parse_end_point-STATUS_BYTES
else:
# assume if not a stat that hit above, we have a sample. Mis-parsing will result
# in extra bytes at the end and a sample exception.
form_list.append((parse_end_point-E_GLOBAL_SAMPLE_BYTES, parse_e
|
nd_point))
parse_end_point = parse_end_point-E_GLOBAL_SAMPLE_BYTES
# if the remaining bytes are less than data sample bytes, all we might have left is a status sample
|
if parse_end_point != 0 and parse_end_point < STATUS_BYTES and parse_end_point < E_GLOBAL_SAMPLE_BYTES and parse_end_point < STATUS_BYTES_AUGMENTED:
self._exception_callback(UnexpectedDataException("Error sieving WE data, inferred sample/status alignment incorrect"))
return_list = []
return return_list
# Because we parsed this backwards, we need to reverse the list to deliver the data in the correct order
return_list = form_list[::-1]
log.debug("returning we sieve/split list %s", return_list)
return return_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.