repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
pavel-odintsov/ru_open_statistics
|
helpers/helperUnicode.py
|
Python
|
gpl-2.0
| 3,890 | 0.001028 |
# Copyright (C) 2013, Stefan Schwarzer
# See the file LICENSE for licensing terms.
"""
tool.py - helper code
"""
from __future__ import unicode_literals
import compat as compat
__all__ = ["same_string_type_as", "as_bytes", "as_unicode",
"as_default_string"]
# Encoding to convert between byte string and unicode string. This is
# a "lossless" encoding: Strings can be encoded/decoded back and forth
# without information loss or causing encoding-related errors. The
# `ftplib` module under Python 3 also uses the "latin1" encoding
# internally. It's important to use the same encoding here, so that users who
# used `ftplib` to create FTP items with non-ASCII characters can access them
# in the same way with ftputil.
LOSSLESS_ENCODING = "utf-8"
def same_string_type_as(type_source, content_source):
"""
Return a string of the same type as `type_source` with the content
from `content_source`.
If the `type_source` and `content_source` don't have the same
type, use `LOSSLESS_ENCODING` above to encode or decode, whatever
operation is needed.
"""
if (
isinstance(type_source, compat.bytes_type) and
isinstance(content_source, compat.unicode_type)):
return content_source.encode(LOSSLESS_ENCODING)
elif (
isinstance(type_source, compat.unicode_type) and
isinstance(content_source, compat.bytes_type)):
return content_source.decode(LOSSLESS_ENCODING)
else:
return content_source
def as_bytes(string):
"""
Return the argument `string` converted to a byte string if it's a
unicode string. Otherwise just return the string.
"""
return same_string_type_as(b"", string)
def as_unicode(string):
"""
Return the argument `string` converted to a unicode string if it's
a byte string. Otherwise just return the string.
"""
return same_string_type_as("", string)
def as_default_string(string):
"""
Return the argument `string` converted to a the default string
type for the Python version. For unicode strings,
`LOSSLESS_ENCODING` is used for encoding or decoding.
"""
return same_string_type_as(compat.default_string_type(), string)
def encode_if_unicode(string, encoding):
"""
Return the string `string`, encoded with `encoding` if `string` is
a unicode string. Otherwise return `string` unchanged.
"""
if isinstance(string, compat.unicode_
|
type):
r
|
eturn string.encode(encoding)
else:
return string
def recursive_str_to_unicode(target):
"""
recursive function for convert all string in dict, tuple and list to unicode
"""
pack_result = []
if isinstance(target, dict):
level = {}
for key, val in target.iteritems():
ukey = recursive_str_to_unicode(key)
uval = recursive_str_to_unicode(val)
level[ukey] = uval
pack_result.append(level)
elif isinstance(target, list):
level = []
for leaf in target:
uleaf = recursive_str_to_unicode(leaf)
level.append(uleaf)
pack_result.append(level)
elif isinstance(target, tuple):
level = []
for leaf in target:
uleaf = recursive_str_to_unicode(leaf)
level.append(uleaf)
pack_result.append(tuple(level))
elif isinstance(target, str):
return as_unicode(target)
else:
return target
result = pack_result.pop()
return result
################################################################################
# Testing
if __name__ == '__main__':
test_obj = {str('myList'): [str('inList1'), str('inList2')],
str('myTuple'): (str('inTuple1'), str('inTuple2')),
str('mystr'): str('text'),
str('myint'): 99}
print repr(test_obj)
print repr(recursive_str_to_unicode(test_obj))
|
chris2727/BeastBot
|
src/inc/modules/suggest.py
|
Python
|
gpl-3.0
| 1,611 | 0.003724 |
from inc import *
modFunc.addCommand('suggest', 'suggest', 'suggest')
modFunc.addCommand('sug', 'suggest', 'suggest')
modFunc.addCommand('issue', 'suggest', 'suggest')
modFunc.addCommand('sug-read', 'suggest', 'read')
modFunc.addCommand('sug-clear', 'suggest', 'clear')
def suggest(line, irc):
message, username, msgto = ircFunc.ircMessage(line)
combinedMsg = ' '.join(message[1:])
numArgs = len(message) - 1
if numArgs > 0 and combinedMsg.strip() != "":
f = open('suggestions.txt' , 'a')
f.write(username + ': ' + combinedMsg)
f.close()
ircFunc.ircSay(username, '%s, thank you for your suggestion... It has been documented and will be reviewed. :)' % username, irc)
else:
ircFunc.ircSay(username, 'You didnt even suggest anything... :/ Command usage is : !suggest <suggestion goes here>', irc)
def read(line, irc):
message, username, msgto = ircFunc.ircMessage(line)
if username in configFunc.getBotConf('
|
botadmins').split(" "):
if (ircFunc.isRegged(username, irc)):
with open('suggestions.txt') as sugfile:
print 'in with'
|
for sugline in sugfile:
ircFunc.ircSay(msgto, sugline, irc)
def clear(line, irc):
message, username, msgto = ircFunc.ircMessage(line)
if username.lower() in configFunc.getBotConf('botadmins').split(" "):
if (ircFunc.isRegged(username, irc)):
f = open('suggestions.txt', 'w')
f.write('Suggestions:' + '\n')
f.close()
ircFunc.ircSay(username, 'Suggestions Cleared.....', irc)
|
enjaz/enjaz
|
studentguide/migrations/0006_tag_image.py
|
Python
|
agpl-3.0
| 446 | 0.002242 |
#
|
-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('studentguide', '0005_add_studentguide_clubs'),
]
operations = [
migrations.AddField(
model_name='tag',
name='image',
field=models.FileField(null=True, upload_to=b'studentguide/tags/', blank=True),
),
]
| |
lennax/util
|
util/atom_data.py
|
Python
|
gpl-3.0
| 6,616 | 0.000151 |
# Copyright 2013-2015 Lenna X. Peterson. All rights reserved.
from .meta import classproperty
class AtomData(object):
# Maximum ASA for each residue
# from Miller et al. 1987, JMB 196: 641-656
total_asa = {
'A': 113.0,
'R': 241.0,
'N': 158.0,
'D': 151.0,
'C': 140.0,
'Q': 189.0,
'E': 183.0,
'G': 85.0,
'H': 194.0,
'I': 182.0,
'L': 180.0,
'K': 211.0,
'M': 204.0,
'F': 218.0,
'P': 143.0,
'S': 122.0,
'T': 146.0,
'W': 259.0,
'Y': 229.0,
'V': 160.0,
}
@classmethod
def is_surface(cls, resn, asa, total_asa=None, cutoff=0.1):
"""Return True if ratio of residue ASA to max ASA >= cutoff"""
if total_asa is None:
total_asa = cls.total_asa
resn = resn.upper()
if len(resn) == 3:
resn = cls.three_to_one[resn]
return float(asa) / total_asa[resn] >= cutoff
three_to_full = {
'Val': 'Valine', 'Ile': 'Isoleucine', 'Leu': 'Leucine',
'Glu': 'Glutamic acid', 'Gln': 'Glutamine',
'Asp': 'Aspartic acid', 'Asn': 'Asparagine', 'His': 'Histidine',
'Trp': 'Tryptophan', 'Phe': 'Phenylalanine', 'Tyr': 'Tyrosine',
'Arg': 'Arginine', 'Lys': 'Lysine',
'Ser': 'Serine', 'Thr': 'Threonine',
'Met': 'Methionine', 'Ala': 'Alanine',
'Gly': 'Glycine', 'Pro': 'Proline', 'Cys': 'Cysteine'}
three_to_one = {
'VAL': 'V', 'ILE': 'I', 'LEU': 'L', 'GLU': 'E', 'GLN': 'Q',
'ASP': 'D', 'ASN': 'N', 'HIS': 'H', 'TRP': 'W', 'PHE': 'F', 'TYR': 'Y',
'ARG': 'R', 'LYS': 'K', 'SER': 'S', 'THR': 'T', 'MET': 'M', 'ALA': 'A',
'GLY': 'G', 'PRO': 'P', 'CYS': 'C'}
one_to_three = {o: t for t, o in three_to_one.iteritems()}
@classproperty
def one_to_full(cls):
"""
This can't see three_to_full unless explicitly passed because
dict comprehensions c
|
reate their own local scope
"""
return {o: cls.three_to_full[t.title()] for t, o in cls.three_to_one.iteritems()}
res_atom_list = dict(
ALA=['
|
C', 'CA', 'CB', 'N', 'O'],
ARG=['C', 'CA', 'CB', 'CD', 'CG', 'CZ', 'N', 'NE', 'NH1', 'NH2', 'O'],
ASN=['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'],
ASP=['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'],
CYS=['C', 'CA', 'CB', 'N', 'O', 'SG'],
GLN=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'NE2', 'O', 'OE1'],
GLU=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O', 'OE1', 'OE2'],
GLY=['C', 'CA', 'N', 'O'],
HIS=['C', 'CA', 'CB', 'CD2', 'CE1', 'CG', 'N', 'ND1', 'NE2', 'O'],
ILE=['C', 'CA', 'CB', 'CD1', 'CG1', 'CG2', 'N', 'O'],
LEU=['C', 'CA', 'CB', 'CD1', 'CD2', 'CG', 'N', 'O'],
LYS=['C', 'CA', 'CB', 'CD', 'CE', 'CG', 'N', 'NZ', 'O'],
MET=['C', 'CA', 'CB', 'CE', 'CG', 'N', 'O', 'SD'],
PHE=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O'],
PRO=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O'],
SER=['C', 'CA', 'CB', 'N', 'O', 'OG'],
THR=['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'],
TRP=['C', 'CA', 'CB', 'CD1', 'CD2', 'CE2',
'CE3', 'CG', 'CH2', 'CZ2', 'CZ3', 'N', 'NE1', 'O'],
TYR=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O', 'OH'],
VAL=['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O'],
)
all_chi = dict(
chi1=dict(
ARG=['N', 'CA', 'CB', 'CG'],
ASN=['N', 'CA', 'CB', 'CG'],
ASP=['N', 'CA', 'CB', 'CG'],
CYS=['N', 'CA', 'CB', 'SG'],
GLN=['N', 'CA', 'CB', 'CG'],
GLU=['N', 'CA', 'CB', 'CG'],
HIS=['N', 'CA', 'CB', 'CG'],
ILE=['N', 'CA', 'CB', 'CG1'],
LEU=['N', 'CA', 'CB', 'CG'],
LYS=['N', 'CA', 'CB', 'CG'],
MET=['N', 'CA', 'CB', 'CG'],
PHE=['N', 'CA', 'CB', 'CG'],
PRO=['N', 'CA', 'CB', 'CG'],
SER=['N', 'CA', 'CB', 'OG'],
THR=['N', 'CA', 'CB', 'OG1'],
TRP=['N', 'CA', 'CB', 'CG'],
TYR=['N', 'CA', 'CB', 'CG'],
VAL=['N', 'CA', 'CB', 'CG1'],
),
chi2=dict(
ARG=['CA', 'CB', 'CG', 'CD'],
ASN=['CA', 'CB', 'CG', 'OD1'],
ASP=['CA', 'CB', 'CG', 'OD1'],
GLN=['CA', 'CB', 'CG', 'CD'],
GLU=['CA', 'CB', 'CG', 'CD'],
HIS=['CA', 'CB', 'CG', 'ND1'],
ILE=['CA', 'CB', 'CG1', 'CD1'],
LEU=['CA', 'CB', 'CG', 'CD1'],
LYS=['CA', 'CB', 'CG', 'CD'],
MET=['CA', 'CB', 'CG', 'SD'],
PHE=['CA', 'CB', 'CG', 'CD1'],
PRO=['CA', 'CB', 'CG', 'CD'],
TRP=['CA', 'CB', 'CG', 'CD1'],
TYR=['CA', 'CB', 'CG', 'CD1'],
),
chi3=dict(
ARG=['CB', 'CG', 'CD', 'NE'],
GLN=['CB', 'CG', 'CD', 'OE1'],
GLU=['CB', 'CG', 'CD', 'OE1'],
LYS=['CB', 'CG', 'CD', 'CE'],
MET=['CB', 'CG', 'SD', 'CE'],
),
chi4=dict(
ARG=['CG', 'CD', 'NE', 'CZ'],
LYS=['CG', 'CD', 'CE', 'NZ'],
),
chi5=dict(
ARG=['CD', 'NE', 'CZ', 'NH1'],
),
)
alt_chi = dict(
chi1=dict(
VAL=['N', 'CA', 'CB', 'CG2'],
),
chi2=dict(
ASP=['CA', 'CB', 'CG', 'OD2'],
LEU=['CA', 'CB', 'CG', 'CD2'],
PHE=['CA', 'CB', 'CG', 'CD2'],
TYR=['CA', 'CB', 'CG', 'CD2'],
),
)
chi_atoms = dict(
ARG=set(['CB', 'CA', 'CG', 'NE', 'N', 'CZ', 'NH1', 'CD']),
ASN=set(['CB', 'CA', 'N', 'CG', 'OD1']),
ASP=set(['CB', 'CA', 'N', 'CG', 'OD1', 'OD2']),
CYS=set(['CB', 'CA', 'SG', 'N']),
GLN=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
GLU=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
HIS=set(['ND1', 'CB', 'CA', 'CG', 'N']),
ILE=set(['CG1', 'CB', 'CA', 'CD1', 'N']),
LEU=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
LYS=set(['CB', 'CA', 'CG', 'CE', 'N', 'NZ', 'CD']),
MET=set(['CB', 'CA', 'CG', 'CE', 'N', 'SD']),
PHE=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
PRO=set(['CB', 'CA', 'N', 'CG', 'CD']),
SER=set(['OG', 'CB', 'CA', 'N']),
THR=set(['CB', 'CA', 'OG1', 'N']),
TRP=set(['CB', 'CA', 'CG', 'CD1', 'N']),
TYR=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
VAL=set(['CG1', 'CG2', 'CB', 'CA', 'N']),
)
|
ichi23de5/ichi_Repo
|
property/models/inspection.py
|
Python
|
gpl-3.0
| 2,328 | 0.007732 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
class Inspection(models.Model):
_name = 'property.inspection'
_order = 'date desc'
_inherit = ['mail.thread', 'ir.needaction_mixin']
property_id = fields.Many2one('property', string='Property ID', required=True, readonly=True)
### inspection ###
date = fields.Date(string='Date', required=True,)
inspector_id = fields.Many2one('res.users', string='Inspector')
act_type = fields.Selection([
('inspect', 'Tenken'),
('routine_inspection', 'Teikitenken'),
('change', 'Koukan'),
('repair', 'Syuri'),
('coordinate', 'Tyousei'),
('others', 'Other'),],
string='Act type')
inspection_note = fields.Text(string='Note')
product_memo
|
= fields.Text(string='product_memo', help='Koukan sita kiki wo kaitene')
### request ###
request_id = fields.Many2one('property.inspection.request', string='Request')
request_date = fields.Date(string='request_date', related='request_id.date', readon
|
ly=True)
requester_name = fields.Char(string='requester_name', related='request_id.partner_id.name', readonly=True)
request_note = fields.Text(string='request_note', related='request_id.request_note', readonly=True)
responder_name = fields.Char(string='responder_name', related='request_id.user_id.name', readonly=True)
### ###
state = fields.Selection([
('ongoing', 'Taioutyu'),
('arranging', 'Tehaityu'),
('finishing', 'Kanryo'),],
string='state')
class InspectionRequest(models.Model):
_name = 'property.inspection.request'
_order = 'date desc'
date = fields.Date(string='Date', required=True, copy=False,)
partner_id = fields.Many2one('res.partner', string='partner_id',)
request_note = fields.Text(string='request_note',)
user_id = fields.Many2one('res.users', string='user_id', required=True, help='hosyu no irai wo uketahitoy')
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
vals['name'] = self.env['ir.sequence'].next_by_code('inspection.request') or 'New'
result = super(InspectionRequest, self).create(vals)
return result
|
fengren/python_koans
|
python2/koans/about_string_manipulation.py
|
Python
|
mit
| 2,781 | 0.000719 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5), \
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("a", string[1])
def test_single_characters_can_be_represented_by_integers(self):
sel
|
f.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = stri
|
ng.split()
self.assertEqual(["Sausage", "Egg", "Cheese"], words)
def test_strings_can_be_split_with_different_patterns(self):
import re # import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertEqual(["the", "rain", "in", "spain"], words)
# `pattern` is a Python regular expression pattern which matches
# ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual('\\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual('Guido', 'guido'.capitalize())
self.assertEqual('GUIDO', 'guido'.upper())
self.assertEqual('timbot', 'TimBot'.lower())
self.assertEqual('Guido Van Rossum', 'guido van rossum'.title())
self.assertEqual('tOtAlLy AwEsOmE', 'ToTaLlY aWeSoMe'.swapcase())
|
saahil/MSSegmentation
|
dice.py
|
Python
|
gpl-2.0
| 1,199 | 0.010008 |
import numpy
from PIL import Image
import sys
#if len(sys.argv) != 3:
# sys.exit('usage: dice.py path_to_segmented_image path_to_ground_truth_image')
pairs = [['/home/ognawala/data/PatientMS-R/20140120T143753/20140120T143753_annotated_rf.png', '/home/ognawala/data/Patient-Mask/20140120T143753-mask.png'], ['/home/ognawala/data/PatientMS-R/20140120T150515/20140120T150515_annotated_rf.png', '/home/ognawala/data/Patient-Mask/20140120T150515-mask.png']]
# intersection set
n_aib = 0
#individual markings
n_y = 0
n_truth = 0
for p in pairs:
y = Image.open(p[0])
y = numpy.array(y, dtype='uint8')
print p[0]
print y.shape
truth_im = Image.open(p[1])
truth_y = numpy.array(truth_im, dtype='uint8')
print
|
p[1]
print truth_y.shape
# flatten arrays
truth_y = truth_y.flatten()
y = y.flatten()
print truth_y.shape
print y.shape
for i in range(len(y)):
# both marked?
if y[i]==200 and truth_y[i]==0:
n_aib += 1
# y marked
if y[i]==200:
n_y += 1
# truth marked
if truth_y[i]==0:
n_truth += 1
dice = float(
|
2*n_aib)/float(n_y+n_truth)
print dice
|
kambysese/mne-python
|
tutorials/preprocessing/plot_10_preprocessing_overview.py
|
Python
|
bsd-3-clause
| 11,702 | 0 |
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-overview:
Overview of artifact detection
==============================
This tutorial covers the basics of artifact detection, and introduces the
artifact detection tools available in MNE-Python.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`:
"""
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # just use a fraction of data for speed here
###############################################################################
# What are artifacts?
# ^^^^^^^^^^^^^^^^^^^
#
# Artifacts are parts of the recorded signal that arise from sources other than
# the source of interest (i.e., neuronal activity in the brain). As such,
# artifacts are a form of interference or noise relative to the signal of
# interest. There are many possible causes of such interference, for example:
#
# - Environmental artifacts
# - Persistent oscillations centered around the `AC power line frequency`_
# (typically 50 or 60 Hz)
# - Brief signal jumps due to building vibration (such as a door slamming)
# - Electromagnetic field noise from nearby elevators, cell phones, the
# geomagnetic field, etc.
#
# - Instrumentation artifacts
# - Electromagnetic interference from stimulus presentation (such as EEG
# se
|
nsors picking up the field generated by unshielded headphones)
# - Continuous oscillations at specific frequencies used by he
|
ad position
# indicator (HPI) coils
# - Random high-amplitude fluctuations (or alternatively, constant zero
# signal) in a single channel due to sensor malfunction (e.g., in surface
# electrodes, poor scalp contact)
#
# - Biological artifacts
# - Periodic `QRS`_-like signal patterns (especially in magnetometer
# channels) due to electrical activity of the heart
# - Short step-like deflections (especially in frontal EEG channels) due to
# eye movements
# - Large transient deflections (especially in frontal EEG channels) due to
# blinking
# - Brief bursts of high frequency fluctuations across several channels due
# to the muscular activity during swallowing
#
# There are also some cases where signals from within the brain can be
# considered artifactual. For example, if a researcher is primarily interested
# in the sensory response to a stimulus, but the experimental paradigm involves
# a behavioral response (such as button press), the neural activity associated
# with the planning and executing the button press could be considered an
# artifact relative to signal of interest (i.e., the evoked sensory response).
#
# .. note::
# Artifacts of the same genesis may appear different in recordings made by
# different EEG or MEG systems, due to differences in sensor design (e.g.,
# passive vs. active EEG electrodes; axial vs. planar gradiometers, etc).
#
#
# What to do about artifacts
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# There are 3 basic options when faced with artifacts in your recordings:
#
# 1. *Ignore* the artifact and carry on with analysis
# 2. *Exclude* the corrupted portion of the data and analyze the remaining data
# 3. *Repair* the artifact by suppressing artifactual part of the recording
# while (hopefully) leaving the signal of interest intact
#
# There are many different approaches to repairing artifacts, and MNE-Python
# includes a variety of tools for artifact repair, including digital filtering,
# independent components analysis (ICA), Maxwell filtering / signal-space
# separation (SSS), and signal-space projection (SSP). Separate tutorials
# demonstrate each of these techniques for artifact repair. Many of the
# artifact repair techniques work on both continuous (raw) data and on data
# that has already been epoched (though not necessarily equally well); some can
# be applied to `memory-mapped`_ data while others require the data to be
# copied into RAM. Of course, before you can choose any of these strategies you
# must first *detect* the artifacts, which is the topic of the next section.
#
#
# Artifact detection
# ^^^^^^^^^^^^^^^^^^
#
# MNE-Python includes a few tools for automated detection of certain artifacts
# (such as heartbeats and blinks), but of course you can always visually
# inspect your data to identify and annotate artifacts as well.
#
# We saw in :ref:`the introductory tutorial <tut-overview>` that the example
# data includes :term:`SSP projectors <projector>`, so before we look at
# artifacts let's set aside the projectors in a separate variable and then
# remove them from the :class:`~mne.io.Raw` object using the
# :meth:`~mne.io.Raw.del_proj` method, so that we can inspect our data in it's
# original, raw state:
ssp_projectors = raw.info['projs']
raw.del_proj()
###############################################################################
# Low-frequency drifts
# ~~~~~~~~~~~~~~~~~~~~
#
# Low-frequency drifts are most readily detected by visual inspection using the
# basic :meth:`~mne.io.Raw.plot` method, though it is helpful to plot a
# relatively long time span and to disable channel-wise DC shift correction.
# Here we plot 60 seconds and show all the magnetometer channels:
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, n_channels=len(mag_channels),
remove_dc=False)
###############################################################################
# Low-frequency drifts are readily removed by high-pass filtering at a fairly
# low cutoff frequency (the wavelength of the drifts seen above is probably
# around 20 seconds, so in this case a cutoff of 0.1 Hz would probably suppress
# most of the drift).
#
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line artifacts are easiest to see on plots of the spectrum, so we'll
# use :meth:`~mne.io.Raw.plot_psd` to illustrate.
fig = raw.plot_psd(tmax=np.inf, fmax=250, average=True)
# add some arrows at 60 Hz and its harmonics:
for ax in fig.axes[1:]:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
ax.arrow(x=freqs[idx], y=psds[idx] + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
###############################################################################
# Here we see narrow frequency peaks at 60, 120, 180, and 240 Hz — the power
# line frequency of the USA (where the sample data was recorded) and its 2nd,
# 3rd, and 4th harmonics. Other peaks (around 25 to 30 Hz, and the second
# harmonic of those) are probably related to the heartbeat, which is more
# easily seen in the time domain using a dedicated heartbeat detection function
# as described in the next section.
#
#
# Heartbeat artifacts (ECG)
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# MNE-Python includes a dedicated function
# :func:`~mne.preprocessing.find_ecg_events` in the :mod:`mne.preprocessing`
# submodule, for detecting heartbeat artifacts from either dedicated ECG
# channels or from magnetometers (if no ECG channel is present). Additionally,
# the function :func:`~mne.preprocessing.create_ecg_epochs` will call
# :func:`~mne.preprocessing.find_ecg_events` under the hood, and use the
# resulting events array to extract epochs centered around the detected
# heartbeat artifacts. Here we create those epochs, then show an image plot of
# the detected ECG artifacts along with the average ERF across artifacts. We'll
# show all three channel types, even though EEG channels are less strongly
# affected by heartbeat artifacts:
# sphinx_gallery_thumbnail_number = 4
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)
ecg_epochs.plot_image(combine='mean')
###############################################################################
# The horizontal streaks in the magnetometer image plot reflect the fact that
# the heartbeat ar
|
kirpit/couchbasekit
|
couchbasekit/fields.py
|
Python
|
mit
| 6,692 | 0.002241 |
#! /usr/bin/env python
"""
couchbasekit.fields
~~~~~~~~~~~~~~~~~~~
:website: http://github.com/kirpit/couchbasekit
:copyright: Copyright 2013, Roy Enjoy <kirpit *at* gmail.com>, see AUTHORS.txt.
:license: MIT, see LICENSE.txt for details.
* :class:`couchbasekit.fields.CustomField`
* :class:`couchbasekit.fields.ChoiceField`
* :class:`couchbasekit.fields.EmailField`
* :class:`couchbasekit.fields.PasswordField`
"""
import re
from abc import ABCMeta
class CustomField(object):
"""The abstract custom field to be extended by all other field classes.
.. note::
You can also create your own custom field types by implementing this
class. All you have to do is to assign your final (that is calculated
and ready to be saved) value to the :attr:`value` property. Please
note that it should also accept unicode raw values, which are fetched
and returned from couchbase server. See :class:`PasswordField` source
code as an example.
Please contribute back if you create a generic and useful custom field.
"""
__metaclass__ = ABCMeta
_value = None
def __init__(self):
raise NotImplementedError()
def __repr__(self):
return repr(self.value)
def __eq__(self, other):
if type(other) is type(self) and other.value==self.value:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def value(self):
"""Property to
|
be used when saving a custom field into
:class:`couchbasekit.document.Document` instance.
:returns: The value to be saved for t
|
he field within
:class:`couchbasekit.document.Document` instances.
:rtype: mixed
"""
if self._value is None:
raise ValueError("%s's 'value' is not set." % type(self).__name__)
return self._value
@value.setter
def value(self, value):
"""Propery setter that should be used to assign final (calculated)
value.
"""
self._value = value
class ChoiceField(CustomField):
"""The custom field to be used for multi choice options such as gender,
static category list etc. This class can't be used directly that has to be
extended by your choice list class. Thankfully, it's just easy::
class Gender(ChoiceField):
CHOICES = {
'M': 'Male',
'F': 'Female',
}
and all you have to do is to pass the current value to create your choice
object:
>>> choice = Gender('F')
>>> choice.value
'F'
>>> choice.text
'Female'
:param choice: The choice value.
:type choice: basestring
"""
__metaclass__ = ABCMeta
CHOICES = {}
def __eq__(self, other):
if super(ChoiceField, self).__eq__(other) and other.CHOICES==self.CHOICES:
return True
return False
def __init__(self, choice):
if not isinstance(self.CHOICES, dict) or not len(self.CHOICES):
raise AttributeError("ChoiceFields must have dictionary 'CHOICES' "
"attribute and cannot be empty.")
if choice not in self.CHOICES:
raise ValueError("Default choice for %s must be "
"within the 'CHOICES' attribute."
% type(self).__name__)
self.value = choice
@property
def text(self):
"""Returns the text of the current choice, object property.
:rtype: unicode
"""
return self.CHOICES.get(self.value)
def iteritems(self):
return self.CHOICES.iteritems()
# stolen from django email validator:
EMAIL_RE = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
# quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"'
r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)$)' # domain
r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3)
class EmailField(CustomField):
"""The custom field to be used for email addresses and intended to validate
them as well.
:param email: Email address to be saved.
:type email: basestring
"""
def __init__(self, email):
if not self.is_valid(email):
raise ValueError("Email address is invalid.")
self.value = email
@staticmethod
def is_valid(email):
"""Email address validation method.
:param email: Email address to be saved.
:type email: basestring
:returns: True if email address is correct, False otherwise.
:rtype: bool
"""
if isinstance(email, basestring) and EMAIL_RE.match(email):
return True
return False
class PasswordField(CustomField):
"""The custom field to be used for password types.
It encrypts the raw passwords on-the-fly and depends on
`py-bcrypt` library for such encryption.
:param password: Raw or encrypted password value.
:type password: unicode
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
LOG_ROUNDS = 12
def __init__(self, password):
if not isinstance(password, basestring):
raise ValueError("Password must be a string or unicode.")
# do the encryption if raw password provided
if not password.startswith(('$2a$', '$2y$')):
bcrypt = self.get_bcrypt()
password = bcrypt.hashpw(password, bcrypt.gensalt(self.LOG_ROUNDS))
self.value = password
@staticmethod
def get_bcrypt():
"""Returns the `py-bcrypt` library for internal usage.
:returns: `py-bcrypt` package.
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
try: import bcrypt
except ImportError:
raise ImportError("PasswordField requires 'py-bcrypt' "
"library to hash the passwords.")
else: return bcrypt
def check_password(self, raw_password):
"""Validates the given raw password against the intance's encrypted one.
:param raw_password: Raw password to be checked against.
:type raw_password: unicode
:returns: True if comparison was successful, False otherwise.
:rtype: bool
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
bcrypt = self.get_bcrypt()
return bcrypt.hashpw(raw_password, self.value)==self.value
|
esikachev/scenario
|
sahara/plugins/mapr/plugin.py
|
Python
|
apache-2.0
| 2,850 | 0 |
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sahara.i18n import _
import sahara.plugins.mapr.versions.version_handler_factory as vhf
import sahara.plugins.provisioning as p
class MapRPlugin(p.ProvisioningPluginBase):
title = 'MapR Hadoop Distribution'
description = _('The MapR Distribution provides a full Hadoop stack that'
' includes the MapR File System (MapR-FS), MapReduce,'
' a complete Hadoop ecosystem, and the MapR Control System'
' user interface')
def _get_handler(self, hadoop_version):
return vhf.VersionHandlerFactory.get().get_handler(hadoop_version)
def get_title(self):
return MapRPlugin.title
def get_description(self):
return MapRPlugin.description
def get_versions(self):
return vhf.VersionHandlerFactory.get().get_versions()
def get_node_processes(self, hadoop_version):
return self._get_handler(hadoop_version).get_node_processes()
def get_configs(self, hadoop_version):
return self._get_handler(hadoop_version).get_configs()
def configure_cluster(self, cluster):
self._get_handler(cluster.hadoop_version).configure_cluster(cluster)
def start_cluster(self, cluster):
self._get_handler(cluster.hadoop_version).start_cluster(cluster)
def validate(self, cluster):
self._get_handler(cluster.hadoop_version).validate(cluster)
def validate_scaling(self, cluster, existing, additional):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.validate_scaling(cluster, existing, additional)
|
def scale_cluster(self, cluster, instances):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.scale_cluster(cluster, instances)
def decommission_nodes(self, cluster, instances):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.decommission_nodes(c
|
luster, instances)
def get_edp_engine(self, cluster, job_type):
v_handler = self._get_handler(cluster.hadoop_version)
return v_handler.get_edp_engine(cluster, job_type)
def get_open_ports(self, node_group):
v_handler = self._get_handler(node_group.cluster.hadoop_version)
return v_handler.get_open_ports(node_group)
|
matburt/ansible
|
lib/ansible/module_utils/openstack.py
|
Python
|
gpl-3.0
| 4,528 | 0.006846 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
def openstack_argument_spec():
# DEPRECATED: This argument spec is only used for the deprecated old
# OpenStack modules. It turns out that modern OpenStack auth is WAY
# more complex than this.
# Consume standard OpenStack environment variables.
# This is mainly only useful for ad-hoc command line operation as
# in playbooks one would assume variables would be used appropriately
OS_AUTH_URL=os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
OS_PASSWORD=os.environ.get('OS_PASSWORD', None)
OS_REGION_NAME=os.environ.get('OS_REGION_NAME', None)
OS_USERNAME=os.environ.get('OS_USERNAME', 'admin')
OS_TENANT_NAME=os.environ.get('OS_TENANT_NAME', OS_USERNAME)
spec = dict(
login_username = dict(default=OS_USERNAME),
auth_url = dict(default=OS_AUTH_URL),
region_name = dict(default=OS_REGION_NAME),
availability_zone = dict(default=None),
)
if OS_PASSWORD:
spec['login_password'] = dict(default=OS_PASSWORD)
else:
spec['login_password'] = dict(required=True)
if OS_TENANT_NAME:
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
else:
spec['login_tenant_name'] = dict(required=True)
return spec
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
ret = []
for
|
(k, v) in addresses.iteritems():
if key_name and k == key_name:
ret.extend([addrs['addr'] for addrs in v])
else:
for interface_spec in v:
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
ret.append(interface_spec['addr'])
return ret
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None),
auth_type=dict(de
|
fault=None),
auth=dict(default=None, no_log=True),
region_name=dict(default=None),
availability_zone=dict(default=None),
verify=dict(default=True, aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
key=dict(default=None, no_log=True),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
api_timeout=dict(default=None, type='int'),
endpoint_type=dict(
default='public', choices=['public', 'internal', 'admin']
)
)
spec.update(kwargs)
return spec
def openstack_module_kwargs(**kwargs):
ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
ret[key].extend(kwargs[key])
else:
ret[key] = kwargs[key]
return ret
|
raycarnes/account-financial-tools
|
account_journal_always_check_date/__openerp__.py
|
Python
|
agpl-3.0
| 2,081 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Journal Always Check Date module for OpenERP
# Copyright (C) 2013-2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under
|
the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should ha
|
ve received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Journal Always Check Date',
'version': '0.1',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Option Check Date in Period always active on journals',
'description': """
Check Date in Period always active on Account Journals
======================================================
This module:
* activates the 'Check Date in Period' option on all existing account journals,
* enable the 'Check Date in Period' option on new account journals,
* prevent users from deactivating the 'Check Date in Period' option.
So this module is an additionnal security for countries where, on an account
move, the date must be inside the period.
Please contact Alexis de Lattre from Akretion <alexis.delattre@akretion.com>
for any help or question about this module.
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['account'],
'data': [],
'installable': True,
'active': False,
}
|
crimoniv/odoo-module-tools
|
repository_management/vcs_wrapper/vcs_wrapper.py
|
Python
|
agpl-3.0
| 1,918 | 0 |
# -*- coding: utf-8 -*-
# © 2016 Cristian Moncho <cristian.moncho@diagram.es>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
_logger = logging.getLogger(__name__)
_LOADED_VCS = []
def load_vcs(vcs):
vcs = vcs.lower()
modname = 'vcs.%s' % (vcs,)
clsname = vcs.title().replace('_', '')
try:
mod = getattr(__import__(modname, globals(), locals(), [], -1), vcs)
return getattr(mod, clsname)
except AttributeError:
raise Exception(
'Wrapper not found: from %s import %s' % (modname, clsname))
# [TODO] Automatically detect *.py files in 'vcs' folder
for vcs in ('git', 'bzr', 'hg', 'svn'):
try:
_LOADED_VCS.append((vcs, load_vcs(vcs)))
except Exception as e:
_logger.warning('Unable to load "%s" module: %s', vcs, e)
_logger.debug('Enabled VCS: %s', ', '.join(t[0] for t in _LOADED_VCS))
class VcsWrapper(object):
""" Version Control System Wrapper. """
def __new__(cls, vcs, path, **kwargs):
if not vcs:
vcs = cls._guess_vcs(path)
try:
return dict(_LOADED_VC
|
S)[vcs](path, **kwargs)
except KeyError:
raise Exception('Unknown repository structure in %s' % (path,))
@classmethod
def available_vcs(cls):
|
return zip(*_LOADED_VCS)[0] if _LOADED_VCS else ()
@classmethod
def from_source(cls, vcs, path, source, branch=None, **kwargs):
res = cls(vcs, path)
res.init(source, branch=branch, **kwargs)
res.load()
return res
@classmethod
def from_dir(cls, vcs, path, **kwargs):
res = cls(vcs, path)
res.load(**kwargs)
return res
@staticmethod
def _guess_vcs(path):
""" Inspect the given path and search which VCS wrapper needs. """
for vcs, cls in _LOADED_VCS:
if cls.is_repo(path):
return vcs
|
DavidAndreev/indico
|
indico/modules/events/timetable/models/entries.py
|
Python
|
gpl-3.0
| 13,696 | 0.001168 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import timedelta
from sqlalchemy import DDL
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.base im
|
port NEVER_SET, NO_VALUE
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime, PyIntEnum
from indico.core.db.sqlalchemy.util.models import populate_one_to_one_backrefs
from indico.util.date_time import overlaps
from indico.util.locators i
|
mport locator_property
from indico.util.string import format_repr, return_ascii
from indico.util.struct.enum import TitledIntEnum
from indico.util.i18n import _
class TimetableEntryType(TitledIntEnum):
__titles__ = [None, _("Session Block"), _("Contribution"), _("Break")]
# entries are uppercase since `break` is a keyword...
SESSION_BLOCK = 1
CONTRIBUTION = 2
BREAK = 3
def _make_check(type_, *cols):
all_cols = {'session_block_id', 'contribution_id', 'break_id'}
required_cols = all_cols & set(cols)
forbidden_cols = all_cols - required_cols
criteria = ['{} IS NULL'.format(col) for col in sorted(forbidden_cols)]
criteria += ['{} IS NOT NULL'.format(col) for col in sorted(required_cols)]
condition = 'type != {} OR ({})'.format(type_, ' AND '.join(criteria))
return db.CheckConstraint(condition, 'valid_{}'.format(type_.name.lower()))
class TimetableEntry(db.Model):
__tablename__ = 'timetable_entries'
@declared_attr
def __table_args__(cls):
return (db.Index('ix_timetable_entries_start_dt_desc', cls.start_dt.desc()),
_make_check(TimetableEntryType.SESSION_BLOCK, 'session_block_id'),
_make_check(TimetableEntryType.CONTRIBUTION, 'contribution_id'),
_make_check(TimetableEntryType.BREAK, 'break_id'),
db.CheckConstraint("type != {} OR parent_id IS NULL".format(TimetableEntryType.SESSION_BLOCK),
'valid_parent'),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
parent_id = db.Column(
db.Integer,
db.ForeignKey('events.timetable_entries.id'),
index=True,
nullable=True,
)
session_block_id = db.Column(
db.Integer,
db.ForeignKey('events.session_blocks.id'),
index=True,
unique=True,
nullable=True
)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
unique=True,
nullable=True
)
break_id = db.Column(
db.Integer,
db.ForeignKey('events.breaks.id'),
index=True,
unique=True,
nullable=True
)
type = db.Column(
PyIntEnum(TimetableEntryType),
nullable=False
)
start_dt = db.Column(
UTCDateTime,
nullable=False
)
event_new = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'timetable_entries',
order_by=lambda: TimetableEntry.start_dt,
cascade='all, delete-orphan',
lazy='dynamic'
)
)
session_block = db.relationship(
'SessionBlock',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
contribution = db.relationship(
'Contribution',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
break_ = db.relationship(
'Break',
cascade='all, delete-orphan',
single_parent=True,
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
children = db.relationship(
'TimetableEntry',
order_by='TimetableEntry.start_dt',
lazy=True,
backref=db.backref(
'parent',
remote_side=[id],
lazy=True
)
)
# relationship backrefs:
# - parent (TimetableEntry.children)
@property
def object(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return self.session_block
elif self.type == TimetableEntryType.CONTRIBUTION:
return self.contribution
elif self.type == TimetableEntryType.BREAK:
return self.break_
@object.setter
def object(self, value):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
self.session_block = self.contribution = self.break_ = None
if isinstance(value, SessionBlock):
self.session_block = value
elif isinstance(value, Contribution):
self.contribution = value
elif isinstance(value, Break):
self.break_ = value
elif value is not None:
raise TypeError('Unexpected object: {}'.format(value))
@hybrid_property
def duration(self):
return self.object.duration if self.object is not None else None
@duration.setter
def duration(self, value):
self.object.duration = value
@duration.expression
def duration(cls):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
return db.case({
TimetableEntryType.SESSION_BLOCK.value:
db.select([SessionBlock.duration])
.where(SessionBlock.id == cls.session_block_id)
.correlate_except(SessionBlock)
.as_scalar(),
TimetableEntryType.CONTRIBUTION.value:
db.select([Contribution.duration])
.where(Contribution.id == cls.contribution_id)
.correlate_except(Contribution)
.as_scalar(),
TimetableEntryType.BREAK.value:
db.select([Break.duration])
.where(Break.id == cls.break_id)
.correlate_except(Break)
.as_scalar(),
}, value=cls.type)
@hybrid_property
def end_dt(self):
if self.start_dt is None or self.duration is None:
return None
return self.start_dt + self.duration
@end_dt.expression
def end_dt(cls):
return cls.start_dt + cls.duration
@property
def session_siblings(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return [x for x in self.siblings
if x.session_block and x.session_block.session == self.session_block.session]
elif self.parent:
return self.siblings
else:
return []
@property
def siblings(self):
from indico.modules.events.timetable.util import get_top_level_entries, get_nested_entries
tzinfo = self.event_ne
|
dbrgn/mopidy
|
tests/mpd/test_translator.py
|
Python
|
apache-2.0
| 6,809 | 0 |
from __future__ import absolute_import, unicode_literals
import unittest
from mopidy.internal import path
from mopidy.models import Album, Artist, Playlist, TlTrack, Track
from mopidy.mpd import translator
class TrackMpdFormatTest(unittest.TestCase):
track = Track(
uri='a uri',
artists=[Artist(name='an artist')],
name='a name',
album=Album(
name='an album', num_tracks=13,
artists=[Artist(name='an other artist')]),
track_no=7,
composers=[Artist(name='a composer')],
performers=[Artist(name='a performer')],
genre='a genre',
date='1977-01-01',
disc_no=1,
comment='a comment',
length=137000,
)
def setUp(self): # noqa: N802
self.media_dir = '/dir/subdir'
path.mtime.set_fake_time(1234567)
def tearDown(self): # noqa: N802
path.mtime.undo_fake()
def test_track_to_mpd_format_for_empty_track(self):
# TODO: this is likely wrong, see:
# https://github.com/mopidy/mopidy/issues/923#issuecomment-79584110
result = translator.track_to_mpd_format(Track())
self.assertIn(('file', ''), result)
self.assertIn(('Time', 0), result)
self.assertIn(('Artist', ''), re
|
sult)
self.assertIn(('Title', ''), result)
self.assertIn(('Album', ''), result)
self.assertIn(('Track', 0), result)
self.assertNotIn(('Date', ''), result)
self.assertEqual(len(result), 6)
def test_track_to_mpd_format_with_position(self):
result = translator.track_to_mpd_format(Track(), position=1)
|
self.assertNotIn(('Pos', 1), result)
def test_track_to_mpd_format_with_tlid(self):
result = translator.track_to_mpd_format(TlTrack(1, Track()))
self.assertNotIn(('Id', 1), result)
def test_track_to_mpd_format_with_position_and_tlid(self):
result = translator.track_to_mpd_format(
TlTrack(2, Track()), position=1)
self.assertIn(('Pos', 1), result)
self.assertIn(('Id', 2), result)
def test_track_to_mpd_format_for_nonempty_track(self):
result = translator.track_to_mpd_format(
TlTrack(122, self.track), position=9)
self.assertIn(('file', 'a uri'), result)
self.assertIn(('Time', 137), result)
self.assertIn(('Artist', 'an artist'), result)
self.assertIn(('Title', 'a name'), result)
self.assertIn(('Album', 'an album'), result)
self.assertIn(('AlbumArtist', 'an other artist'), result)
self.assertIn(('Composer', 'a composer'), result)
self.assertIn(('Performer', 'a performer'), result)
self.assertIn(('Genre', 'a genre'), result)
self.assertIn(('Track', '7/13'), result)
self.assertIn(('Date', '1977-01-01'), result)
self.assertIn(('Disc', 1), result)
self.assertIn(('Pos', 9), result)
self.assertIn(('Id', 122), result)
self.assertNotIn(('Comment', 'a comment'), result)
self.assertEqual(len(result), 14)
def test_track_to_mpd_format_with_last_modified(self):
track = self.track.replace(last_modified=995303899000)
result = translator.track_to_mpd_format(track)
self.assertIn(('Last-Modified', '2001-07-16T17:18:19Z'), result)
def test_track_to_mpd_format_with_last_modified_of_zero(self):
track = self.track.replace(last_modified=0)
result = translator.track_to_mpd_format(track)
keys = [k for k, v in result]
self.assertNotIn('Last-Modified', keys)
def test_track_to_mpd_format_musicbrainz_trackid(self):
track = self.track.replace(musicbrainz_id='foo')
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_TRACKID', 'foo'), result)
def test_track_to_mpd_format_musicbrainz_albumid(self):
album = self.track.album.replace(musicbrainz_id='foo')
track = self.track.replace(album=album)
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_ALBUMID', 'foo'), result)
def test_track_to_mpd_format_musicbrainz_albumartistid(self):
artist = list(self.track.artists)[0].replace(musicbrainz_id='foo')
album = self.track.album.replace(artists=[artist])
track = self.track.replace(album=album)
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_ALBUMARTISTID', 'foo'), result)
def test_track_to_mpd_format_musicbrainz_artistid(self):
artist = list(self.track.artists)[0].replace(musicbrainz_id='foo')
track = self.track.replace(artists=[artist])
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_ARTISTID', 'foo'), result)
def test_concat_multi_values(self):
artists = [Artist(name='ABBA'), Artist(name='Beatles')]
translated = translator.concat_multi_values(artists, 'name')
self.assertEqual(translated, 'ABBA;Beatles')
def test_concat_multi_values_artist_with_no_name(self):
artists = [Artist(name=None)]
translated = translator.concat_multi_values(artists, 'name')
self.assertEqual(translated, '')
def test_concat_multi_values_artist_with_no_musicbrainz_id(self):
artists = [Artist(name='Jah Wobble')]
translated = translator.concat_multi_values(artists, 'musicbrainz_id')
self.assertEqual(translated, '')
def test_track_to_mpd_format_with_stream_title(self):
result = translator.track_to_mpd_format(self.track, stream_title='foo')
self.assertIn(('Name', 'a name'), result)
self.assertIn(('Title', 'foo'), result)
def test_track_to_mpd_format_with_empty_stream_title(self):
result = translator.track_to_mpd_format(self.track, stream_title='')
self.assertIn(('Name', 'a name'), result)
self.assertIn(('Title', ''), result)
def test_track_to_mpd_format_with_stream_and_no_track_name(self):
track = self.track.replace(name=None)
result = translator.track_to_mpd_format(track, stream_title='foo')
self.assertNotIn(('Name', ''), result)
self.assertIn(('Title', 'foo'), result)
class PlaylistMpdFormatTest(unittest.TestCase):
def test_mpd_format(self):
playlist = Playlist(tracks=[
Track(track_no=1), Track(track_no=2), Track(track_no=3)])
result = translator.playlist_to_mpd_format(playlist)
self.assertEqual(len(result), 3)
def test_mpd_format_with_range(self):
playlist = Playlist(tracks=[
Track(track_no=1), Track(track_no=2), Track(track_no=3)])
result = translator.playlist_to_mpd_format(playlist, 1, 2)
self.assertEqual(len(result), 1)
self.assertEqual(dict(result[0])['Track'], 2)
|
dilawar/moose-full
|
moose-examples/snippets/funcReacLotkaVolterra.py
|
Python
|
gpl-2.0
| 5,043 | 0.029942 |
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import moose
runtime = 50.0
def makeModel():
# create container for model
model = moose.Neutral( 'model' )
harmonic = moose.CubeMesh( '/model/harmonic' )
harmonic.volume = 1e-15
lotka = moose.CubeMesh( '/model/lotka' )
lotka.volume = 1e-15
# create molecules and reactions
x = moose.Pool( '/model/lotka/x' )
y = moose.Pool( '/model/lotka/y' )
z = moose.BufPool( '/model/lotka/z' ) # Dummy molecule.
xreac = moose.Reac( '/model/lotka/xreac' )
yreac = moose.Reac( '/model/lotka/yreac' )
xrate = moose.Function( '/model/lotka/xreac/func' )
yrate = moose.Function( '/model/lotka/yreac/func' )
# Parameters
alpha = 1.0
beta = 1.0
gamma = 1.0
delta = 1.0
k = 1.0
x.nInit = 2.0
y.nInit = 1.0
z.nInit = 0.0
xrate.x.num = 1
yrate.x.num = 1
xrate.expr = "x0 * " + str( beta ) + " - " + str( alpha )
yrate.expr = str( gamma ) + " - x0 * " + str( delta )
xreac.Kf = k
yreac.Kf = k
xreac.Kb = 0
yreac.Kb = 0
# connect them up for reactions
moose.connect( y, 'nOut', xrate.x[0], 'input' )
moose.connect( x, 'nOut', yrate.x[0], 'input' )
moose.connect( xrate, 'valueOut', xreac, 'setNumKf' )
moose.connect( yrate, 'valueOut', yreac, 'setNumKf' )
moose.connect( xreac, '
|
sub', x, 'reac' )
moose.connect( xreac, 'prd', z, 'reac' )
moose.connect( yreac, 'sub', y, 'reac' )
moose.connect( yreac, 'prd', z, 'reac' )
# Create the output tables
graphs = moose.Neutral( '/model/grap
|
hs' )
xplot = moose.Table2 ( '/model/graphs/x' )
yplot = moose.Table2 ( '/model/graphs/y' )
# connect up the tables
moose.connect( xplot, 'requestOut', x, 'getN' );
moose.connect( yplot, 'requestOut', y, 'getN' );
def main():
"""
The funcReacLotkaVolterra example shows how to use function objects
as part of differential equation systems in the framework of the MOOSE
kinetic solvers. Here the system is set up explicitly using the
scripting, in normal use one would expect to use SBML.
In this example we set up a Lotka-Volterra system. The equations
are readily expressed as a pair of reactions each of whose rate is
governed by a function::
x' = x( alpha - beta.y )
y' = -y( gamma - delta.x )
This translates into two reactions::
x ---> z Kf = beta.y - alpha
y ---> z Kf = gamma - delta.x
Here z is a dummy molecule whose concentration is buffered to zero.
The model first runs using default Exponential Euler integration.
This is not particularly accurate even with a small timestep.
The model is then converted to use the deterministic Kinetic solver
Ksolve. This is accurate and faster.
Note that we cannot use the stochastic GSSA solver for this system, it
cannot handle a reaction term whose rate keeps changing.
"""
makeModel()
for i in range( 11, 18 ):
moose.setClock( i, 0.001 )
moose.setClock( 18, 0.1 )
moose.reinit()
moose.start( runtime ) # Run the model
# Iterate through all plots, dump their contents to data.plot.
for x in moose.wildcardFind( '/model/graphs/#' ):
#x.xplot( 'scriptKineticModel.plot', x.name )
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec
pylab.plot( t, x.vector, label=x.name )
pylab.ylim( 0, 2.5 )
pylab.title( "Exponential Euler solution. Note slight error buildup" )
pylab.legend()
pylab.figure()
compt = moose.element( '/model/lotka' )
ksolve = moose.Ksolve( '/model/lotka/ksolve' )
stoich = moose.Stoich( '/model/lotka/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = '/model/lotka/##'
moose.reinit()
moose.start( runtime ) # Run the model
for i in range( 11, 18 ):
moose.setClock( i, 0.1 )
for x in moose.wildcardFind( '/model/graphs/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec
pylab.plot( t, x.vector, label=x.name )
pylab.ylim( 0, 2.5 )
pylab.title( "Runge-Kutta solution." )
pylab.legend()
pylab.show()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/xblock/runtime/blockstore_field_data.py
|
Python
|
agpl-3.0
| 16,343 | 0.003243 |
"""
Key-value store that holds XBlock field data read out of Blockstore
"""
from collections import namedtuple
from weakref import WeakKeyDictionary
import logging
from xblock.exceptions import InvalidScopeError, NoSuchDefinition
from xblock.fields import Field, BlockScope, Scope, UserScope, Sentinel
from xblock.field_data import FieldData
from openedx.core.djangoapps.xblock.learning_context.manager import get_learning_context_impl
from openedx.core.djangolib.blockstore_cache import (
get_bundle_version_files_cached,
get_bundle_draft_files_cached,
)
log = logging.getLogger(__name__)
ActiveBlock = namedtuple('ActiveBlock', ['olx_hash', 'changed_fields'])
DELETED = Sentinel('DELETED') # Special value indicating a field was reset to its default value
CHILDREN_INCLUDES = Sentinel('CHILDREN_INCLUDES') # Key for a pseudo-field that stores the XBlock's children info
MAX_DEFINITIONS_LOADED = 100 # How many of the most recently used XBlocks' field data to keep in memory at max.
class BlockInstanceUniqueKey(object):
"""
An empty object used as a unique key for each XBlock instance, see
get_weak_key_for_block() and BlockstoreFieldData._get_active_block(). Every
XBlock instance will get a unique one of these keys, even if they are
otherwise identical. Its purpose is similar to `id(block)`.
"""
def get_weak_key_for_block(block):
"""
Given an XBlock instance, return an object with the same lifetime as the
block, suitable as a key to hold block-specific data in a WeakKeyDictionary.
"""
# We would like to make the XBlock instance 'block' itself the key of
# BlockstoreFieldData.active_blocks, so that we have exactly one entry per
# XBlock instance in memory, and they'll each be automatically freed by the
# WeakKeyDictionary as needed. But because XModules implement
# __eq__() in a way that reads all field values, just attempting to use
# the block as a dict key here will trigger infinite recursion. So
# instead we key the dict on an arbitrary object,
# block key = BlockInstanceUniqueKey() which we create here. That way
# the weak reference will still cause the entry in the WeakKeyDictionary to
# be freed automatically when the block is no longer needed, and we
# still get one entry per XBlock instance.
if not hasattr(block, '_field_data_key_obj'):
block._field_data_key_obj = BlockInstanceUniqueKey() # pylint: disable=protected-access
return block._field_data_key_obj # pylint: disable=protected-access
def get_olx_hash_for_definition_key(def_key):
"""
Given a BundleDefinitionLocator, which identifies a specific version of an
OLX file, return the hash of the OLX file as given by the Blockstore API.
"""
if def_key.bundle_version:
# This is referring to an immutable file (BundleVersions are immutable so this can be aggressively cached)
files_list = get_bundle_version_files_cached(def_key.bundle_uuid, def_key.bundle_version)
else:
# This is referring to a draft OLX file which may be recently updated:
files_list = get_bundle_draft_files_cached(def_key.bundle_uuid, def_key.draft_name)
for entry in files_list:
if entry.path == def_key.olx_path:
return entry.hash_digest
raise NoSuchDefinition("Could not load OLX file for key {}".format(def_key))
class BlockstoreFieldData(FieldData):
"""
An XBlock FieldData implementation that reads XBlock field data directly out
of Blockstore.
It requires that every XBlock have a BundleDefinitionLocator as its
"definition key", since the BundleDefinitionLocator is what specifies the
OLX file path and version to use.
Within Blockstore there is no mechanism for setting different field values
at the usage level compared to the definition level, so we treat
usage-scoped fields identically to definition-scoped fields.
"""
def __init__(self):
"""
Initialize this BlockstoreFieldData instance.
"""
# loaded definitions: a dict where the key is the hash of the XBlock's
# olx file (as stated by the Blockstore API), and the values is the
# dict of field data as loaded from that OLX file. The field data dicts
# in this should be considered immutable, and never modified.
self.loaded_definitions = {}
# Active blocks: this holds the field data *changes* for all the XBlocks
# that are currently in memory being used for something. We only keep a
# weak refe
|
rence so that the memory will be freed when the XBlock is no
# longer needed (e.g. at the end of a request)
# The
|
key of this dictionary is on ID object owned by the XBlock itself
# (see _get_active_block()) and the value is an ActiveBlock object
# (which holds olx_hash and changed_fields)
self.active_blocks = WeakKeyDictionary()
super(BlockstoreFieldData, self).__init__() # lint-amnesty, pylint: disable=super-with-arguments
def _getfield(self, block, name):
"""
Return the field with the given `name` from `block`.
If the XBlock doesn't have such a field, raises a KeyError.
"""
# First, get the field from the class, if defined
block_field = getattr(block.__class__, name, None)
if block_field is not None and isinstance(block_field, Field):
return block_field
# Not in the class, so name really doesn't name a field
raise KeyError(name)
def _check_field(self, block, name):
"""
Given a block and the name of one of its fields, check that we will be
able to read/write it.
"""
if name == CHILDREN_INCLUDES:
return # This is a pseudo-field used in conjunction with BlockstoreChildrenData
field = self._getfield(block, name)
if field.scope in (Scope.children, Scope.parent): # lint-amnesty, pylint: disable=no-else-raise
# This field data store is focused on definition-level field data, and children/parent is mostly
# relevant at the usage level. Scope.parent doesn't even seem to be used?
raise NotImplementedError("Setting Scope.children/parent is not supported by BlockstoreFieldData.")
else:
if field.scope.user != UserScope.NONE:
raise InvalidScopeError("BlockstoreFieldData only supports UserScope.NONE fields")
if field.scope.block not in (BlockScope.DEFINITION, BlockScope.USAGE):
raise InvalidScopeError(
"BlockstoreFieldData does not support BlockScope.{} fields".format(field.scope.block)
)
# There is also BlockScope.TYPE but we don't need to support that;
# it's mostly relevant as Scope.preferences(UserScope.ONE, BlockScope.TYPE)
# Which would be handled by a user-aware FieldData implementation
def _get_active_block(self, block):
"""
Get the ActiveBlock entry for the specified block, creating it if
necessary.
"""
key = get_weak_key_for_block(block)
if key not in self.active_blocks:
self.active_blocks[key] = ActiveBlock(
olx_hash=get_olx_hash_for_definition_key(block.scope_ids.def_id),
changed_fields={},
)
return self.active_blocks[key]
def get(self, block, name):
"""
Get the given field value from Blockstore
If the XBlock has been making changes to its fields, the value will be
in self._get_active_block(block).changed_fields[name]
Otherwise, the value comes from self.loaded_definitions which is a dict
of OLX file field data, keyed by the hash of the OLX file.
"""
self._check_field(block, name)
entry = self._get_active_block(block)
if name in entry.changed_fields:
value = entry.changed_fields[name]
if value == DELETED:
raise KeyError # KeyError means use the default value, since this field was deliberately set to default
return
|
bertrandF/DictionaryDB
|
db.py
|
Python
|
gpl-2.0
| 8,296 | 0.008799 |
#!/usr/bin/python3.4
#############################################################################
#
# Dictionnary DB managing script. Add/Del/Search definitions
# Copyright (C) 2014 bertrand
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#############################################################################
###############
### Imports ###
import sys
import psycopg2 as PSQL
import textwrap as txtwrp
#####################
### Configuration ###
config = {
'VERSION_MAJOR' : '0',
'VERSION_MINOR' : '1',
'dbname' : 'bertrand',
'user' : 'bertrand'
}
#############
### USAGE ###
def usage():
print("Tool to insert/remove entries in the dicotionnnary.")
print("Version: " + config['VERSION_MAJOR'] + "." + config['VERSION_MINOR'])
print("Usage: " + sys.argv[0] + " <command> <options>")
print("")
print("Commands:")
print(" add Add definition
|
to dictionnary.")
print(" del Remove definition from dictionnary.")
print(" help Print general help or command specific help.")
print(" search Search definit
|
ion in dictionnary.")
print("")
###########
### ADD ###
def add():
argc = len(sys.argv)
if argc < 3:
__help_cmd(sys.argv[1])
return
req = {
'fields' : '',
'name' : '',
'def' : '',
'url' : ''
}
i=2
while i < argc:
if sys.argv[i] == "-d":
i += 1
req['def'] = sys.argv[i]
elif sys.argv[i] == "-f":
i += 1
req['fields'] = sys.argv[i]
elif sys.argv[i] == '-n':
i += 1
req['name'] = sys.argv[i]
elif sys.argv[i] == "-u":
i += 1
req['url'] = sys.argv[i]
else:
print("Unknown option '" + sys.argv[i] + "'")
__help_cmd(sys.argv[1])
return
i += 1
if req['fields'] == '':
print("Please specify fields with option '-f'.")
__help_cmd(sys.argv[1])
return
elif req['name'] == '':
print("Please specify fields with option '-f'.")
__help_cmd(sys.argv[1])
return
elif req['def'] == '':
print("Please specify definition with option '-d'.")
__help_cmd(sys.argv[1])
return
conn = PSQL.connect("dbname=" + config['dbname'] + " user=" + config['user'])
cur = conn.cursor()
req = cur.mogrify("INSERT INTO dico (fields,name,def,url) VALUES (%s, %s, %s, %s)",
("{" + req['fields'] + "}", req['name'], req['def'], req['url']))
print(req)
cur.execute(req)
conn.commit()
cur.close()
conn.close()
###########
### DEL ###
def delete():
try:
defid = sys.argv[2]
except IndexError:
print("Missing argument.")
__help_cmd(sys.argv[1])
return
conn = PSQL.connect("dbname=" + config['dbname'] + " user=" + config['user'])
cur = conn.cursor()
req = cur.mogrify("DELETE FROM dico WHERE id=%s", (defid,))
print(req)
cur.execute(req)
conn.commit()
cur.close()
conn.close()
#####################
### HELP COMMANDS ###
def help_cmd():
try:
cmd = sys.argv[2]
except:
cmd = ''
__help_cmd(cmd)
def __help_cmd(cmd):
if cmd == '' :
usage()
elif cmd == "add" :
print("Command '" + cmd + "': Add definition to dictionnary.")
print("Usage: " + sys.argv[0] + " " + cmd + " <options>")
print("")
print("Options:")
print(" -d <str> Definition.")
print(" -f <str,str,..> List of fields.")
print(" -n <str> Name of the entry")
print(" -u <url> One url to a more complete definition.")
print("")
elif cmd == "del" :
print("Command '" + cmd + "': Delete definition from dictionnary.")
print("Usage: " + sys.argv[0] + " " + cmd + " <id>")
print("")
print("Param:")
print(" id ID of the definition to delete.")
print("")
elif cmd == "help" :
print("Command '" + cmd + "': Print help.")
print("Usage: " + sys.argv[0] + " " + cmd + " [command]")
print("")
print("Giving NO 'command' this will print the general help.")
print("Giving 'command' this will print the command specific help. ")
print("")
elif cmd == "search" :
print("Command '" + cmd + "': Search definition in dictionnary.")
print("Usage: " + sys.argv[0] + " " + cmd + " <options>")
print("")
print("Options:")
print(" -a Print all definitions in the table.")
print(" -f <str,str,...> Print definitions matching the set of given fields.")
print(" -i <id> Print definition matching the given ID.")
print(" -n <str> Print definition mathing the given entry name.")
print("")
else:
print("Unknown command: '" + cmd + "'")
usage()
##############
### SEARCH ###
def search():
try:
opt = sys.argv[2]
except IndexError:
__help_cmd(sys.argv[1])
return
else:
if not opt in ('-a', '-f', '-i', '-n'):
print("Unknown option '" + sys.argv[2] + "'")
__help_cmd(sys.argv[1])
return
conn = PSQL.connect("dbname=" + config['dbname'] + " user=" + config['user'])
cur = conn.cursor()
try:
if opt == "-a":
req = cur.mogrify("SELECT id,fields,name,def,url FROM dico")
elif opt == "-f":
optarg = sys.argv[3]
req = __search_build_req_fields(optarg.split(','))
elif opt == '-i':
optarg = sys.argv[3]
req = cur.mogrify("SELECT id,fields,name,def,url FROM dico WHERE id=%s", (optarg,))
elif opt == "-n":
optarg = sys.argv[3]
req = cur.mogrify("SELECT id,fields,name,def,url FROM dico WHERE name=%s", (optarg,))
except IndexError:
print("Missing argument.")
__help_cmd(sys.argv[1])
else:
print(req)
cur.execute(req)
print_rows(cur.fetchall())
conn.commit()
finally:
cur.close()
conn.close()
def __search_build_req_fields(fields):
# How do you like your SQL injection?
# I like mine crispy and with a python '+' ;)
# http://initd.org/psycopg/docs/usage.html
# http://xkcd.com/327/
# That will do for now ...
req = "SELECT id,fields,name,def,url FROM dico WHERE "
req += "'" + fields[0] + "'=ANY(fields)"
for f in fields[1:]:
req += " OR '" + f + "'=ANY(fields)"
return req
###################################
### PRINT PSQL REQUESTS RESULTS ###
def print_rows(rows):
for row in rows:
print("---------------------")
print("ID : ", row[0])
__print_row_wrapped("FIELDS : ", row[1])
__print_row_wrapped("NAME : ", row[2])
__print_row_wrapped("DEF : ", row[3])
__print_row_wrapped("URL : ", row[4])
print("")
def __print_row_wrapped(label, value):
labellen = len(label)
wrapped = txtwrp.wrap(value)
print(label, wrapped[0])
for i in range(1, len(wrapped)):
print(' ' * labellen, wrapped[i])
############
### MAIN ###
commands = {
'add' : add,
'del' : delete,
'help' : help_cmd,
'search' : search
}
try:
cmd = sys.argv[1]
except KeyError:
print("Unknown co
|
HorizonXP/python-react-router
|
react_router/render.py
|
Python
|
mit
| 5,105 | 0.003918 |
import os
import sys
import json
from optional_django import staticfiles
from optional_django.serializers import JSONEncoder
from optional_django.safestring import mark_safe
from optional_django import six
from js_host.function import Function
from js_host.exceptions import FunctionError
from react.render import RenderedComponent
from react.exceptions import ComponentSourceFileNotFound
from react.exceptions import ReactRenderingError
from react_router.conf import settings
from react_router.templates import MOUNT_JS
from react_router.bundle import bundle_component
from webpack.compiler import WebpackBundle
class RouteRenderedComponent(RenderedComponent):
def get_client_asset(self):
client_asset = None
bundled_component = self.get_bundle()
assets = bundled_component.get_assets()
for asset in assets:
if asset['path'] == self.path_to_source:
client_asset = asset
break
return client_asset
def get_var(self):
client_asset = self.get_client_asset()
if client_asset:
return 'client'
raise Exception("Client asset not found.")
def render_js(self):
client_asset = self.get_client_asset()
if client_asset:
client_bundle = mark_safe(WebpackBundle.render_tag(client_asset['url']))
return mark_safe(
'\n{bundle}\n<script>\n{mount_js}\n</script>\n'.format(
bundle=client_bundle,
mount_js=self.render_mount_js(),
)
)
def render_mount_js(self):
return mark_safe(
MOUNT_JS.format(
var=self.get_var(),
props=self.serialized_props or 'null',
container_id=self.get_container_id()
)
)
class RouteRedirect(object):
def __init__(self, pathname, query = None, state = None, *args, **kwargs):
self.path = pathname
self.query = query
if state and 'nextPathname' in state:
self.nextPath = state['nextPathname']
else:
self.nextPath = None
if self.path is None:
raise ReactRenderingError("No path returned for redirection.")
super(RouteRedirect, self).__init__(*args, **kwargs)
@property
def url(self):
if self.query:
return "%s?next=%s&%s" % (self.path, self.nextPath, self.query)
else:
return "%s?next=%s" % (self.path, self.nextPath)
class RouteNotFound(object):
def __init__(self, *args, **kwargs):
super(RouteNotFound, self).__init__(*args, **kwargs)
js_host_function = Function(settings.JS_HOST_FUNCTION)
def render_route(
# Rendering options
path, # path to routes file
client_path, # path to client routes file
request, # pass in request object
props=None,
to_static_markup=None,
# Bundling options
bundle=None,
tr
|
anslate=None,
# Prop handling
json_encoder=None
):
if not os.path.isabs(path):
abs_path = staticfiles.find(path)
if not abs_path:
raise ComponentSourceFileNotFound(path)
path = abs_path
if not os.path.exists(path):
raise ComponentSourceFileNotFound(path)
if not os.path.isabs(client_path):
abs_client_path = staticfiles.find(client_path)
if not abs_clien
|
t_path:
raise ComponentSourceFileNotFound(client_path)
client_path = abs_client_path
if not os.path.exists(client_path):
raise ComponentSourceFileNotFound(client_path)
bundled_component = None
import re
client_re = re.compile(r"client-(?:\w*\d*).js",re.IGNORECASE)
server_re = re.compile(r"server-(?:\w*\d*).js",re.IGNORECASE)
if bundle or translate:
bundled_component = bundle_component(path, client_path, translate=translate)
assets = bundled_component.get_assets()
for asset in assets:
m = client_re.search(asset['name'])
if m:
client_path = asset['path']
m = server_re.search(asset['name'])
if m:
path = asset['path']
if json_encoder is None:
json_encoder = JSONEncoder
if props is not None:
serialized_props = json.dumps(props, cls=json_encoder)
else:
serialized_props = None
try:
location = {
'pathname': request.path,
'query': request.GET.dict()
}
cbData = json.loads(js_host_function.call(
path=path,
location=location,
serializedProps=serialized_props,
toStaticMarkup=to_static_markup
))
except FunctionError as e:
raise six.reraise(ReactRenderingError, ReactRenderingError(*e.args), sys.exc_info()[2])
if cbData['match']:
return RouteRenderedComponent(cbData['markup'], client_path, props, serialized_props, bundled_component, to_static_markup)
else:
if cbData['redirectInfo']:
return RouteRedirect(**cbData['redirectInfo'])
else:
return RouteNotFound()
|
attibalazs/nltk-examples
|
7.6_Relation_Extraction.py
|
Python
|
mit
| 332 | 0.006024 |
import nltk
import re
import pprint
def main():
IN = re.compile(r'.*\bin\b(?!\b.+ing)')
for doc in nltk.corpus.ieer.parsed_docs('NYT_1998031
|
5'):
for rel in nltk.sem.extract_rels('ORG', 'LOC', doc, corpus='ieer', pattern=IN):
|
print nltk.sem.relextract.rtuple(rel)
if __name__ == "__main__":
main()
|
eeyorkey/ipac
|
tasks/__init__.py
|
Python
|
gpl-2.0
| 42 | 0.02381 |
__all__ =
|
["pval_task", "annotation_tas
|
k"]
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/extraction.py
|
Python
|
gpl-3.0
| 4,798 | 0.002293 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Pavel Korshunov <Pavel.Korshunov@idiap.ch>
# Tue 22 Sep 17:21:35 CEST 2015
#
# Copyright (C) 2012-2015 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import math
import numpy
logger = logging.getLogger("bob.bio.spear")
def zeromean_unitvar_norm(data, mean, std):
""" Normalized the data with zero mean and unit variance. Mean and variance are in numpy.ndarray format"""
return numpy.divide(data - mean, std)
def calc_mean(c0, c1=[]):
""" Calculates the mean of the data."""
i
|
f c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.0
else:
return
|
numpy.mean(c0, 0)
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
"""
@param c0
@param c1
@param nonStdZero if the std was zero, convert to one. This will avoid a zero division
"""
def calc_mean_std(c0, c1=[], nonStdZero=False):
""" Calculates both the mean of the data. """
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if nonStdZero:
std[std == 0] = 1
return mi, std
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"):
"""Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied)
"""
if not features.size:
raise ValueError(
"vad_filter_features(): data sample is empty, no features extraction is possible"
)
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
(speech,) = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
(silences,) = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[
nzstart : nzend + 1, :
] # numpy slicing is a non-closed interval [)
else:
logger.error(
"vad_filter_features(): VAD labels should be the same length as energy bands"
)
logger.info(
"vad_filter_features(): filtered_features shape: %s",
str(filtered_features.shape),
)
return filtered_features
|
kayhayen/Nuitka
|
nuitka/nodes/CoroutineNodes.py
|
Python
|
apache-2.0
| 4,498 | 0.001112 |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes for coroutine objects and their creations.
Coroutines are turned into normal functions that create generator objects,
whose implementation lives here. The creation itself also lives here.
"""
from .ExpressionBases import ExpressionChildHavingBase
from .FunctionNodes import ExpressionFunctionEntryPointBase
class ExpressionMakeCoroutineObject(ExpressionChildHavingBase):
kind = "EXPRESSION_MAKE_COROUTINE_OBJECT"
named_child = "coroutine_ref"
__slots__ = ("variable_closure_traces",)
def __init__(self, coroutine_ref, source_ref):
assert coroutine_ref.getFunctionBody().isExpressionCoroutineObjectBody()
ExpressionChildHavingBase.__init__(
self, value=coroutine_ref, source_ref=source_ref
)
self.variable_closure_traces = None
def getDetailsForDisplay(self):
return {"coroutine": self.subnode_coroutine_ref.getFunctionBody().getCodeName()}
def computeExpression(self, trace_collection):
self.variable_closure_traces = []
for (
closure_variable
) in self.subnode_coroutine_ref.getFunctionBody().getClosureVariables():
trace = trace_collection.getVariableCurrentTrace(closure_variable)
trace.addNameUsage()
self.variable_closure_traces.append((closure_variable, trace))
# TODO: Coroutine body may know something too.
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False
@staticmethod
def mayHaveSideEffects():
return False
def getClosureVariableVersions(self):
return self.variable_closure_traces
class ExpressionCoroutineObjectBody(ExpressionFunctionEntryPoi
|
ntBase):
kind = "EXPRESSION_COROUTINE_OBJECT_BODY"
__slots__ = ("qualname_setup", "needs_generator_return_exit")
def __init__(self, provider, name, code_object, flags, auto_release, source_ref):
ExpressionFunctionEntryPointBase.__init__(
self,
provider=provider,
name=name,
|
code_object=code_object,
code_prefix="coroutine",
flags=flags,
auto_release=auto_release,
source_ref=source_ref,
)
self.needs_generator_return_exit = False
self.qualname_setup = None
def getFunctionName(self):
return self.name
def markAsNeedsGeneratorReturnHandling(self, value):
self.needs_generator_return_exit = max(self.needs_generator_return_exit, value)
def needsGeneratorReturnHandling(self):
return self.needs_generator_return_exit == 2
def needsGeneratorReturnExit(self):
return bool(self.needs_generator_return_exit)
@staticmethod
def needsCreation():
return False
@staticmethod
def isUnoptimized():
return False
class ExpressionAsyncWait(ExpressionChildHavingBase):
kind = "EXPRESSION_ASYNC_WAIT"
named_child = "expression"
__slots__ = ("exception_preserving",)
def __init__(self, expression, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.exception_preserving = False
@staticmethod
def isExpressionAsyncWait():
return True
def computeExpression(self, trace_collection):
# TODO: Might be predictable based awaitable analysis or for constants.
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
class ExpressionAsyncWaitEnter(ExpressionAsyncWait):
kind = "EXPRESSION_ASYNC_WAIT_ENTER"
class ExpressionAsyncWaitExit(ExpressionAsyncWait):
kind = "EXPRESSION_ASYNC_WAIT_EXIT"
|
xunmengfeng/engine
|
sky/tools/skypy/url_mappings.py
|
Python
|
bsd-3-clause
| 878 | 0.009112 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
class URLMappings(object):
def __init__(self, src_root, build_dir):
self.mappings = {
'dart:mojo.internal': os.path.join(src_root, 'mojo/public/dart/sdk_ext/interna
|
l.dart'),
'dart:sky': os.path.join(build_dir, 'gen/sky/bindings/dart_sky.dart'),
'dart:sky.internals': os.path.join(src_root, 'sky/engine/bindings/sky_internals.dart'),
'dart:sky_builtin_natives': os.path.join(src_root, 'sky/engine/bindings/builtin_natives.dart'),
}
self.packages_root = os.path.join(build_dir, 'gen/dart-pkg/packages')
@property
def as_args(self):
return map(lambda item: '--url-mapping=%s,%s'
|
% item, self.mappings.items())
|
practo/federation
|
manage.py
|
Python
|
mit
| 1,415 | 0.002827 |
import urllib
from flask import url_for
from f
|
lask_script import Manager, Server, Shell, Command
from config.app import create_app
from config.db
|
import db
from config.initializers.newrelic_monitoring import NewrelicMonitoring
from federation_api.people.model import Person
manager = Manager(create_app)
server = Server(host='0.0.0.0', port=1786)
NewrelicMonitoring(manager.app())
manager.add_command('runserver', server)
def _make_context():
models = [Person]
models = {model.__name__: model for model in models}
return dict(app=create_app(), db=db, **models)
manager.add_command('shell', Shell(make_context=_make_context))
class Routes(Command):
def run(self):
output = []
app = manager.app
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
# FIXME: Results in http://<host_name>:<host_port>/<blueprint_mount>/<endpoint>g
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods,
url))
output.append(line)
for line in sorted(output):
print(line)
manager.add_command('routes', Routes())
if __name__ == '__main__':
manager.run()
|
Spiderlover/Toontown
|
toontown/effects/Fireworks.py
|
Python
|
mit
| 9,697 | 0.004744 |
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from pandac.PandaModules import *
import random
from FireworkGlobals import *
colors = {WHITE: Vec4(1, 1, 1, 1),
RED: Vec4(1, 0.2, 0.2, 1),
BLUE: Vec4(0.2, 0.2, 1, 1),
YELLOW: Vec4(1, 1, 0.2, 1),
GREEN: Vec4(0.2, 1, 0.2, 1),
PINK: Vec4(1, 0.5, 0.5, 1),
PEACH: Vec4(0.9, 0.6, 0.4, 1),
PURPLE: Vec4(1, 0.1, 1, 1),
CYAN: Vec4(0.2, 1, 1, 1)}
textures = {SNOWFLAKE: 'phase_8/models/props/snowflake_treasure',
MUSICNOTE: 'phase_6/models/props/music_treasure',
FLOWER: 'phase_8/models/props/flower_treasure',
ICECREAM: 'phase_4/models/props/icecream',
STARFISH: 'phase_6/models/props/starfish_treasure',
ZZZ: 'phase_8/models/props/zzz_treasure'}
fireworkId = 0
def getNextSequenceName(name):
global fireworkId
fireworkId += 1
return '%s-%s' % (name, fireworkId)
def getColor(colorIndex):
return colors.get(colorIndex)
def getTexture(textureIndex):
return loader.loadModel(textures.get(textureIndex))
def shootFirework(style, x = 0, y = 0, z = 0, colorIndex1 = 0, colorIndex2 = 0, amp = 10):
func = style2shootFunc.get(style)
color1 = getColor(colorIndex1)
if style is CIRCLESPRITE:
color2 = getTexture(colorIndex2)
else:
color2 = getColor(colorIndex2)
if func and color1 and color2:
return func(x, y, z, color1, color2, amp)
def shootFireworkRing(x, y, z, color1, color2, amp):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SparkleParticleRenderer')
p0.setEmitter('RingEmitter')
p0.setPoolSize(100)
p0.setBirthRate(0.01)
p0.setLitterSize(100)
p0.setLitterSpread(0)
p0.factory.setLifespanBase(1.5)
p0.factory.setLifespanSpread(0.5)
p0.factory.setMassBase(1.0)
p0.factory.setMassSpread(0.0)
p0.factory.setTerminalVelocityBase(20.0)
p0.factory.setTerminalVelocitySpread(2.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setCenterColor(color1)
p0.renderer.setEdgeColor(color2)
p0.renderer.setBirthRadius(0.3)
p0.renderer.setDeathRadius(0.3)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
p0.emit
|
ter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(0)
p0.emitter.setAmplitudeSpread(0)
f0 = ForceGroup.ForceGroup('gravity')
force0 = LinearSourceForce(Point3(x, y, z), LinearDistanceForce.FTONEOVERR, 0.1, 1.1 * amp, 1)
force0.setActive(1)
f0.addForc
|
e(force0)
force1 = LinearSinkForce(Point3(x, y, z), LinearDistanceForce.FTONEOVERR, 0.5, 2.0 * amp, 1)
force1.setActive(1)
f0.addForce(force1)
f.addForceGroup(f0)
p0.emitter.setRadius(4.0)
f.addParticles(p0)
f.setPos(x, y, z)
f.setHpr(0, random.random() * 180, random.random() * 180)
sfx = loader.loadSfx('phase_4/audio/sfx/firework_distance_03.ogg')
sfx.setVolume(0.7)
t = Sequence(Func(f.start, render, render), Func(sfx.play), Wait(0.5), Func(p0.setBirthRate, 3), Wait(1.5), Func(f.cleanup), name=getNextSequenceName('shootFireworkRing'))
t.start()
def shootFireworkRocket(x, y, z, color1, color2, amp):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SparkleParticleRenderer')
p0.setEmitter('SphereVolumeEmitter')
p0.setPoolSize(110)
p0.setBirthRate(0.01)
p0.setLitterSize(2)
p0.setLitterSpread(0)
p0.factory.setLifespanBase(0.4)
p0.factory.setLifespanSpread(0.1)
p0.factory.setMassBase(1.0)
p0.factory.setMassSpread(0.0)
p0.factory.setTerminalVelocityBase(400.0)
p0.factory.setTerminalVelocitySpread(0.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setCenterColor(color1)
p0.renderer.setEdgeColor(color2)
p0.renderer.setBirthRadius(0.6)
p0.renderer.setDeathRadius(0.6)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(amp)
p0.emitter.setAmplitudeSpread(0.0)
p0.emitter.setRadius(0.3)
f.addParticles(p0)
gravityForceGroup = ForceGroup.ForceGroup('gravity')
force0 = LinearVectorForce(Vec3(0.0, 0.0, -10.0), 1.0, 0)
force0.setActive(1)
gravityForceGroup.addForce(force0)
f.addForceGroup(gravityForceGroup)
f.setPos(x, y, z)
sfxName = random.choice(('phase_4/audio/sfx/firework_whistle_01.ogg', 'phase_4/audio/sfx/firework_whistle_02.ogg'))
sfx = loader.loadSfx(sfxName)
sfx.setVolume(0.4)
t = Sequence(Func(f.start, render, render), Func(sfx.play), LerpPosInterval(f, 2.0, Vec3(x, y, z + 20 * amp), blendType='easeInOut'), Func(p0.setBirthRate, 3), Wait(0.5), Func(f.cleanup), name=getNextSequenceName('shootFirework'))
t.start()
def shootPop(x, y, z, color1, color2, amp):
sfxName = random.choice(('phase_4/audio/sfx/firework_distance_01.ogg', 'phase_4/audio/sfx/firework_distance_02.ogg', 'phase_4/audio/sfx/firework_distance_03.ogg'))
sfx = loader.loadSfx(sfxName)
t = Sequence(Func(sfx.play), Wait(3), name=getNextSequenceName('shootFireworkRocket'))
t.start()
def shootFireworkCircle(x, y, z, color1, color2, amp):
return shootFireworkCircleGeneric(x, y, z, color1, color2, amp, 100)
def shootFireworkCircleLarge(x, y, z, color1, color2, amp):
return shootFireworkCircleGeneric(x, y, z, color1, color2, amp * 1.5, 200)
def shootFireworkCircleSmall(x, y, z, color1, color2, amp):
return shootFireworkCircleGeneric(x, y, z, color1, color2, amp * 0.5, 50)
def shootFireworkCircleGeneric(x, y, z, color1, color2, amp, poolSize):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SparkleParticleRenderer')
p0.setEmitter('SphereVolumeEmitter')
p0.setPoolSize(poolSize)
p0.setBirthRate(0.01)
p0.setLitterSize(poolSize)
p0.factory.setLifespanBase(2.0)
p0.factory.setLifespanSpread(0.5)
p0.factory.setTerminalVelocityBase(400.0)
p0.factory.setTerminalVelocitySpread(40.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setCenterColor(color1)
p0.renderer.setEdgeColor(color1)
p0.renderer.setBirthRadius(0.4)
p0.renderer.setDeathRadius(0.6)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPSCALE)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitudeSpread(0.1)
p0.emitter.setAmplitude(amp)
p0.emitter.setRadius(0.1)
f.addParticles(p0)
circleForceGroup = ForceGroup.ForceGroup('gravity')
force1 = LinearSinkForce(Point3(x, y, z - 100), LinearDistanceForce.FTONEOVERRSQUARED, 2.0, 0.3 * amp * 0.1, 1)
force1.setActive(1)
circleForceGroup.addForce(force1)
f.addForceGroup(circleForceGroup)
f.setPos(x, y, z)
sfxName = random.choice(('phase_4/audio/sfx/firework_explosion_01.ogg', 'phase_4/audio/sfx/firework_explosion_02.ogg', 'phase_4/audio/sfx/firework_explosion_03.ogg'))
sfx = loader.loadSfx(sfxName)
sfx.setVolume(0.7)
t = Sequence(Func(f.start, render, render), Func(sfx.play), Wait(0.5), Func(p0.setBirthRate, 3), Wait(0.5), Func(p0.renderer.setCenterColor, color2), Func(p0.renderer.setEdgeColor, color2), Wait(1.5), Func(f.cleanup), name=getNextSequenceName('shootFireworkCircle'))
t.start()
def shootFireworkCircleSprite(x, y, z, color, texture, amp):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SpriteParticleRenderer')
p0.setEmitter('SphereVolumeEmitter')
p0.setPoolSize(100)
p0.setBirthRate(0.01)
p0.setLitterSize(100)
p0.factory.setLifespanBase(2.0)
p0.factory.setLifespanSpread(0.5)
p0.factory.setTerminalVelocityBase(400.0)
p0.factory.setTerminalVelocitySpread(40.0)
p0.rende
|
draperlaboratory/user-ale
|
demo/dashboard/files/twisted_app.py
|
Python
|
apache-2.0
| 7,791 | 0.005391 |
#
# Copyright 2016 The Charles Stark Draper Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from twisted.web.server import Site
from twisted.web.static import File
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.application import service, internet
from twisted.application.app import startApplication
import os
import sys
import logging
from logging import config
import logging.handlers
import argparse
import simplejson
parser = argparse.ArgumentParser(description='Export incoming JSON logs to a specified file.')
parser.add_argument('-c', '--config', type=str, help='Configuration file path.')
parser.add_argument('-p', '--port', type=int, default=80, help='Port for the TCP server to run on.')
parser.add_argument('-l', '--log-directory', type=str, help='Directory in which to output log files.')
parser.add_argument('-f', '--filename', type=str, default="xdata", help='Specify filename to store logs.')
parser.add_argument('--allow-origin', type=str,\
help='List of string URLs to allow Cross-Origin requests from.', nargs='*')
arguments = parser.parse_known_args()[0]
valid_keys = set(['port', 'log_directory', 'filename', 'allow_origin'])
if arguments.config is not None:
with open(arguments.config, 'r') as config_file:
settings = simplejson.loads(config_file.read())
else:
settings = vars(arguments)
settings = { key: settings[key] for key in settings if key in valid_keys }
if 'port' not in settings:
settings['port'] = 80
if 'log_directory' not in settings or settings['log_directory'] is None:
print 'Missing required config parameter log_directory.'
sys.exit(1)
if os.path.exists(settings['log_directory']):
if not os.access(settings['log_directory'], os.W_OK):
print 'Insufficient permissions to write to log directory %s' % settings['log_directory']
sys.exit(1)
else:
try:
os.makedirs(settings['log_directory'])
except:
print 'Unable to create log directory %s' % settings['log_directory']
sys.exit(1)
# logging configuration
LOG_SETTINGS = {
'version': 1,
'handlers': {
settings['filename'] + '-js': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'xdata',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-js.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
# Deprecated
settings['filename'] + '-v2': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'xdata',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-v2.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
settings['filename'] + '-v3': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'xdata',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-v3.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
settings['filename'] + '-error': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'detailed',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-error.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
},
'formatters': {
'xdata': {
'format': '%(message)s',
},
'detailed': {
'format': '%(asctime)s %(module)-17s line:%(lineno)-4d ' \
'%(levelname)-8s %(message)s',
},
'email': {
'format': 'Timestamp: %(asctime)s\nModule: %(module)s\n' \
'Line: %(lineno)d\nMessage: %(message)s',
},
},
'loggers': {
settings['filename'] + '-js': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-js',]
},
settings['filename'] + '-v2': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-v2',]
},
settings['filename'] + '-v3': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-v3',]
},
settings['filename'] + '-error': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-error',]
},
}
}
config.dictConfig(LOG_SETTINGS)
logger_js = logging.getLogger(settings['filename'] + '-js')
logger = logging.getLogger(settings['filename'] + '-v2')
loggerv3 = logging.getLogger(settings['filename'] + '-v3')
logger_err = logging.getLogger(settings['filename'] + '-error')
wf_dict = {
0: 'WF_OTHER',
1: 'WF_DEFINE',
2: 'WF_GETDATA',
3: 'WF_EXPLORE',
4: 'WF_CR
|
EATE',
5: 'WF_ENRICH',
6: 'WF_TRANSFORM'
}
def get_allow_origin(request):
if 'allow_origin' not in settings or settings['allow_origin'] is None:
return '*'
elif isinstance(settings['allow_origin'], list):
origin = request.getHeader('Origin')
return 'null' if origin not in settings['allow_origin'] else origin
else:
return settings['allow_origin']
def log_json(data):
if ('useraleVersion' in
|
data) and (data ['useraleVersion'].split('.')[0] == '4'):
logger_js.info(simplejson.dumps(data))
elif ('useraleVersion' in data) and (data['useraleVersion'].split('.')[0] == '3'):
loggerv3.info(simplejson.dumps(data))
elif ('parms' in data) and ('wf_state' in data['parms']):
data['wf_state_longname'] = wf_dict[data['parms']['wf_state']]
logger.info(simplejson.dumps(data))
class Logger(Resource):
def render_OPTIONS(self, request):
request.setHeader('Access-Control-Allow-Origin', get_allow_origin(request))
request.setHeader('Access-Control-Allow-Methods', 'POST')
request.setHeader('Access-Control-Allow-Headers', 'x-prototype-version,x-requested-with,Content-Type')
request.setHeader('Access-Control-Max-Age', 2520) # 42 hours
return ''
def render_POST(self, request):
request.setHeader('Access-Control-Allow-Origin', get_allow_origin(request))
request.setHeader('Access-Control-Allow-Methods', 'POST')
request.setHeader('Access-Control-Allow-Headers', 'x-prototype-version,x-requested-with,Content-Type')
request.setHeader('Access-Control-Max-Age', 2520) # 42 hours
data = simplejson.loads(request.content.getvalue())
try:
if isinstance(data, list):
for datum in data:
log_json(datum)
else:
log_json(data)
except Exception as e:
logger_err.error(e)
return ''
root = Resource()
root.putChild('send_log', Logger())
# create a resource to serve static files
tmp_service = internet.TCPServer(settings['port'], Site(root))
application = service.Application('User-ALE')
# attach the service to its parent application
tmp_service.setServiceParent(application)
startApplication(application, 0)
reactor.run()
|
mnubo/kubernetes-py
|
kubernetes_py/models/v1/DeleteOptions.py
|
Python
|
apache-2.0
| 3,140 | 0.002866 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import json
import yaml
from kubernetes_py.utils import is_valid_string
class DeleteOptions(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_deleteoptions
"""
def __init__(self):
super(DeleteOptions, self).__init__()
# TODO(froch): add support for the below.
# self._preconditions = None
self._kind = "DeleteOptions"
self._api_version = "v1"
self._grace_period_seconds = 0
self._orphan_dependents = False
# ------------------------------------------------------------------------------------- kind
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, k=0):
if not is_valid_string(k):
raise SyntaxError("DeleteOptions: kind: [ {0} ] is invalid.".format(k))
self._kind = k
# ------------------------------------------------------------------------------------- apiVersion
@property
def api_version(self):
return self._api_versi
|
on
@api_version.setter
def api_version(self, v=0):
if not is_valid_string(v):
raise SyntaxError("DeleteOptions: api_version: [ {0} ] is invalid.".format(v))
self._kind = v
#
|
------------------------------------------------------------------------------------- grace period seconds
@property
def grace_period_seconds(self):
return self._grace_period_seconds
@grace_period_seconds.setter
def grace_period_seconds(self, secs=0):
if not isinstance(secs, int):
raise SyntaxError("DeleteOptions: grace_period_seconds: [ {0} ] is invalid.".format(secs))
self._grace_period_seconds = secs
# ------------------------------------------------------------------------------------- orphanDependents
@property
def orphan_dependents(self):
return self._orphan_dependents
@orphan_dependents.setter
def orphan_dependents(self, orphan=False):
if not isinstance(orphan, bool):
raise SyntaxError("DeleteOptions: orphan_dependents: [ {0} ] is invalid.".format(orphan))
self._orphan_dependents = orphan
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.kind is not None:
data["kind"] = self.kind
if self.api_version is not None:
data["apiVersion"] = self.api_version
if self.grace_period_seconds is not None:
data["gracePeriodSeconds"] = self.grace_period_seconds
if self.orphan_dependents is not None:
data["orphanDependents"] = self.orphan_dependents
return data
def as_json(self):
data = self.serialize()
j = json.dumps(data, indent=4)
return j
def as_yaml(self):
data = self.serialize()
y = yaml.dump(data, default_flow_style=False)
return y
|
nickpack/reportlab
|
tools/pythonpoint/styles/standard.py
|
Python
|
bsd-3-clause
| 4,262 | 0.005631 |
from reportlab.lib import styles
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.platypus import Preformatted, Paragraph, Frame, \
Image, Table, TableStyle, Spacer
def getParagraphStyles():
"""Returns a dictionary of styles to get you started.
We will provide a way to specify a module of these. Note that
this just includes TableStyles as well as ParagraphStyles for any
tables you wish to use.
"""
stylesheet = {}
ParagraphStyle = styles.ParagraphStyle
para = ParagraphStyle('Normal', None) #the ancestor of all
para.fontName = 'Times-Roman'
para.fontSize = 24
para.leading = 28
stylesheet['Normal'] = para
#This one is spaced out a bit...
para = ParagraphStyle('BodyText', stylesheet['Normal'])
para.spaceBefore = 12
stylesheet['BodyText'] = para
#Indented, for lists
para = ParagraphStyle('Indent', stylesheet['Normal'])
para.leftIndent = 36
para.firstLineIndent = 0
stylesheet['Indent'] = para
para = ParagraphStyle('Centered', stylesheet['Normal'])
para.alignment = TA_CENTER
stylesheet['Centered'] = para
para = ParagraphStyle('BigCentered', stylesheet['Normal'])
para.spaceBefore = 12
para.alignment = TA_CENTER
stylesheet['BigCentered'] = para
para = ParagraphStyle('Italic', stylesheet['BodyText'])
para.fontName = 'Times-Italic'
stylesheet['Italic'] = para
para = ParagraphStyle('Title', stylesheet['Normal'])
para.fontName = 'Times-Roman'
para.fontSize = 48
para.leading = 58
para.alignment = TA_CENTER
stylesheet['Title'] = para
para = ParagraphStyle('Heading1', stylesheet['Normal'])
para.fontName = 'Times-Bold'
para.fontSize = 36
para.leading = 44
para.alignment = TA_CENTER
stylesheet['Heading1'] = para
para = ParagraphStyle('Heading2', stylesheet['Normal'])
para.fontName = 'Times-Bold'
para.fontSize = 28
para.leading = 34
para.spaceBefore = 24
stylesheet['Heading2'] = para
para = ParagraphStyle('Heading3', stylesheet['Normal'])
para.fontName = 'Times-BoldItalic'
para.spaceBefore = 24
stylesheet['Heading3'] = para
para = ParagraphStyle('Heading4', stylesheet['Normal'])
para.fontName = 'Times-BoldItalic'
para.spaceBefore = 6
stylesheet['Heading4'] = para
para = ParagraphStyle('Bullet', stylesheet['Normal'])
para.firstLineIndent = 0
para.leftIndent = 56
para.spaceBefore = 6
para.bulletFontName = 'Symbol'
para.bulletFontSize = 24
para.bulletIndent = 20
stylesheet['Bullet'] = para
para = ParagraphStyle('Definition', stylesheet['Normal'])
#use this for definition lists
para.firstLineIndent = 0
para.leftIndent = 72
para.bulletIndent = 0
para.spac
|
eBefore = 12
para.bulletFontName = 'Helvetica-BoldOblique'
para.bulletFontSize = 24
stylesheet['Definition'] = para
para = ParagraphStyle('Code', stylesheet['Normal'])
para.fontName = 'Courier'
para.fontSize = 16
para.leading = 18
para.l
|
eftIndent = 36
stylesheet['Code'] = para
para = ParagraphStyle('PythonCode', stylesheet['Normal'])
para.fontName = 'Courier'
para.fontSize = 16
para.leading = 18
para.leftIndent = 36
stylesheet['PythonCode'] = para
para = ParagraphStyle('Small', stylesheet['Normal'])
para.fontSize = 12
para.leading = 14
stylesheet['Small'] = para
#now for a table
ts = TableStyle([
('FONT', (0,0), (-1,-1), 'Times-Roman', 24),
('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('LINEBEFORE', (-1,0), (-1,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT'), #all numeric cells right aligned
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))
])
stylesheet['table1'] = ts
return stylesheet
|
olt/mapproxy
|
mapproxy/cache/geopackage.py
|
Python
|
apache-2.0
| 27,948 | 0.003936 |
# This file is part of the MapProxy project.
# Copyright (C) 2011-2013 Omniscale <http://omniscale.de>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR COND
|
ITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations und
|
er the License.
import hashlib
import logging
import os
import re
import sqlite3
import threading
from mapproxy.cache.base import TileCacheBase, tile_buffer, REMOVE_ON_UNLOCK
from mapproxy.compat import BytesIO, PY2, itertools
from mapproxy.image import ImageSource
from mapproxy.srs import get_epsg_num
from mapproxy.util.fs import ensure_directory
from mapproxy.util.lock import FileLock
log = logging.getLogger(__name__)
class GeopackageCache(TileCacheBase):
supports_timestamp = False
def __init__(self, geopackage_file, tile_grid, table_name, with_timestamps=False, timeout=30, wal=False):
self.tile_grid = tile_grid
self.table_name = self._check_table_name(table_name)
self.lock_cache_id = 'gpkg' + hashlib.md5(geopackage_file.encode('utf-8')).hexdigest()
self.geopackage_file = geopackage_file
# XXX timestamps not implemented
self.supports_timestamp = with_timestamps
self.timeout = timeout
self.wal = wal
self.ensure_gpkg()
self._db_conn_cache = threading.local()
@property
def db(self):
if not getattr(self._db_conn_cache, 'db', None):
self.ensure_gpkg()
self._db_conn_cache.db = sqlite3.connect(self.geopackage_file, timeout=self.timeout)
return self._db_conn_cache.db
def cleanup(self):
"""
Close all open connection and remove them from cache.
"""
if getattr(self._db_conn_cache, 'db', None):
self._db_conn_cache.db.close()
self._db_conn_cache.db = None
@staticmethod
def _check_table_name(table_name):
"""
>>> GeopackageCache._check_table_name("test")
'test'
>>> GeopackageCache._check_table_name("test_2")
'test_2'
>>> GeopackageCache._check_table_name("test-2")
'test-2'
>>> GeopackageCache._check_table_name("test3;")
Traceback (most recent call last):
...
ValueError: The table_name test3; contains unsupported characters.
>>> GeopackageCache._check_table_name("table name")
Traceback (most recent call last):
...
ValueError: The table_name table name contains unsupported characters.
@param table_name: A desired name for an geopackage table.
@return: The name of the table if it is good, otherwise an exception.
"""
# Regex string indicating table names which will be accepted.
regex_str = '^[a-zA-Z0-9_-]+$'
if re.match(regex_str, table_name):
return table_name
else:
msg = ("The table name may only contain alphanumeric characters, an underscore, "
"or a dash: {}".format(regex_str))
log.info(msg)
raise ValueError("The table_name {0} contains unsupported characters.".format(table_name))
def ensure_gpkg(self):
if not os.path.isfile(self.geopackage_file):
with FileLock(self.geopackage_file + '.init.lck',
remove_on_unlock=REMOVE_ON_UNLOCK):
ensure_directory(self.geopackage_file)
self._initialize_gpkg()
else:
if not self.check_gpkg():
ensure_directory(self.geopackage_file)
self._initialize_gpkg()
def check_gpkg(self):
if not self._verify_table():
return False
if not self._verify_gpkg_contents():
return False
if not self._verify_tile_size():
return False
return True
def _verify_table(self):
with sqlite3.connect(self.geopackage_file) as db:
cur = db.execute("""SELECT name FROM sqlite_master WHERE type='table' AND name=?""",
(self.table_name,))
content = cur.fetchone()
if not content:
# Table doesn't exist _initialize_gpkg will create a new one.
return False
return True
def _verify_gpkg_contents(self):
with sqlite3.connect(self.geopackage_file) as db:
cur = db.execute("""SELECT * FROM gpkg_contents WHERE table_name = ?"""
, (self.table_name,))
results = cur.fetchone()
if not results:
# Table doesn't exist in gpkg_contents _initialize_gpkg will add it.
return False
gpkg_data_type = results[1]
gpkg_srs_id = results[9]
cur = db.execute("""SELECT * FROM gpkg_spatial_ref_sys WHERE srs_id = ?"""
, (gpkg_srs_id,))
gpkg_coordsys_id = cur.fetchone()[3]
if gpkg_data_type.lower() != "tiles":
log.info("The geopackage table name already exists for a data type other than tiles.")
raise ValueError("table_name is improperly configured.")
if gpkg_coordsys_id != get_epsg_num(self.tile_grid.srs.srs_code):
log.info(
"The geopackage {0} table name {1} already exists and has an SRS of {2}, which does not match the configured" \
" Mapproxy SRS of {3}.".format(self.geopackage_file, self.table_name, gpkg_coordsys_id,
get_epsg_num(self.tile_grid.srs.srs_code)))
raise ValueError("srs is improperly configured.")
return True
def _verify_tile_size(self):
with sqlite3.connect(self.geopackage_file) as db:
cur = db.execute(
"""SELECT * FROM gpkg_tile_matrix WHERE table_name = ?""",
(self.table_name,))
results = cur.fetchall()
results = results[0]
tile_size = self.tile_grid.tile_size
if not results:
# There is no tile conflict. Return to allow the creation of new tiles.
return True
gpkg_table_name, gpkg_zoom_level, gpkg_matrix_width, gpkg_matrix_height, gpkg_tile_width, gpkg_tile_height, \
gpkg_pixel_x_size, gpkg_pixel_y_size = results
resolution = self.tile_grid.resolution(gpkg_zoom_level)
if gpkg_tile_width != tile_size[0] or gpkg_tile_height != tile_size[1]:
log.info(
"The geopackage {0} table name {1} already exists and has tile sizes of ({2},{3})"
" which is different than the configure tile sizes of ({4},{5}).".format(self.geopackage_file,
self.table_name,
gpkg_tile_width,
gpkg_tile_height,
tile_size[0],
tile_size[1]))
log.info("The current mapproxy configuration is invalid for this geopackage.")
raise ValueError("tile_size is improperly configured.")
if not is_close(gpkg_pixel_x_size, resolution) or not is_close(gpkg_pixel_y_size, resolution):
log.info(
"The geopackage {0} table name {1} already exists and level {2} a resolution of ({3:.13f},{4:.13f})"
" which is different than the configured resolution of ({5:.13f},{6:.13f}).".format(self.geopackage_file,
self.table_name,
|
orbitfp7/nova
|
nova/api/ec2/__init__.py
|
Python
|
apache-2.0
| 25,652 | 0.000039 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.a
|
pache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import hashlib
from oslo_config import cfg
fr
|
om oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import timeutils
import requests
import six
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import context as common_context
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
cfg.BoolOpt('keystone_ec2_insecure', default=False, help='Disable SSL '
'certificate verification.'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
CONF.import_group('ssl', 'nova.openstack.common.sslutils')
# Fault Wrapper around all EC2 requests
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_LE("FaultWrapper: %s"), ex)
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt) # noqa
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(explanation=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warning(_LW('Access key %(access_key)s has had '
'%(failures)d failed authentications and '
'will be locked out for %(lock_mins)d '
'minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
def _get_signature(self, req):
"""Extract the signature from the request.
This can be a get/post variable or for version 4 also in a header
called 'Authorization'.
- params['Signature'] == version 0,1,2,3
- params['X-Amz-Signature'] == version 4
- header 'Authorization' == version 4
"""
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
if sig is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
sig = auth_str.partition("Signature=")[2].split(',')[0]
return sig
def _get_access(self, req):
"""Extract the access key identifier.
For version 0/1/2/3 this is passed as the AccessKeyId parameter, for
version 4 it is either an X-Amz-Credential parameter or a Credential=
field in the 'Authorization' header string.
"""
access = req.params.get('AWSAccessKeyId')
if access is None:
cred_param = req.params.get('X-Amz-Credential')
if cred_param:
access = cred_param.split("/")[0]
if access is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
|
apark263/tensorflow
|
tensorflow/compiler/tests/image_ops_test.py
|
Python
|
apache-2.0
| 38,279 | 0.006923 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
def GenerateNumpyRandomRGB(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 256.
class RGBToHSVTest(xla_test.XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in self.float_types:
inp = GenerateNumpyRandomRGB(shape).astype(nptype)
|
# Convert to HSV and back, as a batch and individually
with self.cached_session() as sess:
batch0 = array_ops.placeholder(nptype, shape=shape)
with self.test_scope():
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
with
|
self.test_scope():
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2],
{batch0: inp})
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllCloseAccordingToType(
batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in self.float_types:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv = image_ops.rgb_to_hsv(placeholder)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(rgb_tf, rgb_np, bfloat16_atol=0.03)
def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = GenerateNumpyRandomRGB((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3)
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3)
with self.cached_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv_op = image_ops.rgb_to_hsv(placeholder)
hsv_tf = hsv_op.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
class AdjustContrastTest(xla_test.XLATestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_np.shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = image_ops.adjust_contrast(flt_x, contrast_factor)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllClose(y_tf, y_np, 1e-6)
def testFloatContrast(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testBatchContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = array_ops.placeholder(np.float32)
with self.test_scope():
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval({x: x_np})
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustHueTest(xla_test.XLATestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_sh
|
hcwiley/the-front
|
the_front/the_front/artist/migrations/0005_auto__del_field_artistmedia_is_default_image__del_field_artistmedia_na.py
|
Python
|
gpl-2.0
| 8,560 | 0.006075 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ArtistMedia.is_default_image'
db.delete_column(u'artist_artistmedia', 'is_default_image')
# Deleting field 'ArtistMedia.name'
db.delete_column(u'artist_artistmedia', 'name')
# Deleting field 'ArtistMedia.video_link'
db.delete_column(u'artist_artistmedia', 'video_link')
# Deleting field 'ArtistMedia.full_res_image'
db.delete_column(u'artist_artistmedia', 'full_res_image')
# Deleting field 'ArtistMedia.image'
db.delete_column(u'artist_artistmedia', 'image')
# Deleting field 'ArtistMedia.id'
db.delete_column(u'artist_artistmedia', u'id')
# Deleting field 'ArtistMedia.thumbnail'
db.delete_column(u'artist_artistmedia', 'thumbnail')
# Adding field 'ArtistMedia.frontmedia_ptr'
db.add_column(u'artist_artistmedia', u'frontmedia_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(default=-1, to=orm['front_material.FrontMedia'], unique=True, primary_key=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'ArtistMedia.is_default_image'
db.add_column(u'artist_artistmedia', 'is_default_image',
self.gf('
|
django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'ArtistMedia.name'
db.add_column(u'artist_artistmedia', 'name',
self.gf('django.db.models.fields.CharField')(default='', max_length=100),
keep_default=False)
# Adding field 'ArtistMedia.video_link'
db.add_column(u'artist_artistmedia', 'video_link',
self.gf('django.db.models.fi
|
elds.CharField')(default='', max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.full_res_image'
db.add_column(u'artist_artistmedia', 'full_res_image',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.image'
db.add_column(u'artist_artistmedia', 'image',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.id'
db.add_column(u'artist_artistmedia', u'id',
self.gf('django.db.models.fields.AutoField')(default=1, primary_key=True),
keep_default=False)
# Adding field 'ArtistMedia.thumbnail'
db.add_column(u'artist_artistmedia', 'thumbnail',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Deleting field 'ArtistMedia.frontmedia_ptr'
db.delete_column(u'artist_artistmedia', u'frontmedia_ptr_id')
models = {
u'artist.artist': {
'Meta': {'object_name': 'Artist'},
'artist_statement': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'artist.artistmedia': {
'Meta': {'object_name': 'ArtistMedia', '_ormbases': [u'front_material.FrontMedia']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['artist.Artist']"}),
u'frontmedia_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['front_material.FrontMedia']", 'unique': 'True', 'primary_key': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'front_material.frontmedia': {
'Meta': {'object_name': 'FrontMedia'},
'full_res_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_default_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', []
|
chatelak/RMG-Py
|
rmgpy/tools/canteraTest.py
|
Python
|
mit
| 3,597 | 0.020573 |
import unittest
import os
import numpy
from rmgpy.tools.canteraModel import findIgnitionDelay, CanteraCondition, Cantera
from rmgpy.quantity import Quantity
import rmgpy
class CanteraTest(unittest.TestCase):
def testIgnitionDelay(self):
"""
Test that findIgnitionDelay() works.
"""
t = numpy.arange(0,5,0.5)
P = numpy.array([0,0.33,0.5,0.9,2,4,15,16,16.1,16.2])
OH = numpy.array([0,0.33,0.5,0.9,2,4,15,16,7,2])
CO = OH*0.9
t_ign = findIgnitionDelay(t,P)
self.assertEqual(t_ign,2.75)
t_ign = findIgnitionDelay(t,OH,'maxHalfConcentration')
self.assertEqual(t_ign,3)
t_ign = findIgnitionDelay(t,[OH,CO], 'maxSpeciesConcentrations')
self.assertEqual(t_ign,3.5)
def testRepr(self):
"""
Test that the repr function for a CanteraCondition object can reconstitute
the same object
"""
reactorType='IdealGasReactor'
molFrac={'CC': 0.05, '[Ar]': 0.95}
P=(3,'atm')
T=(1500,'K')
terminationTime=(5e-5,'s')
condition = CanteraCondition(reactorType,
terminationTime,
molFrac,
T0=T,
P0=P)
reprCondition=eval(condition.__repr__())
self.assertEqual(reprCondition.T0.value_si,Quantity(T).value_si)
self.assertEqual(reprCondition.P0.value_si,Quantity(P).value_si)
self.assertEqual(reprCondition.V0,None)
self.assertEqual(reprCondition.molFrac,molFrac)
class RMGToCanteraTest(unittest.TestCase):
"""
Contains unit tests for the conversion of RMG species and reaction objects to Cantera objects.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
from rmgpy.chemkin import loadChemkinFile
folder = os.path.join(os.path.dirname(rmgpy.__file__),'tools/data/various_kinetics')
chemkinPath = os.path.join(folder, 'chem_annotated.inp')
dictionaryPath = os.path.join(folder, 'species_dictionary.txt')
transportPath = os.path.join(folder, 'tran.dat')
species, reactions = loadChemkinFile(chemkinPath, dictionaryPath,transportPath)
self.rmg_ctSpecies = [spec.toCantera() for spec in species]
self.rmg_ctReactions = []
for rxn in reactions:
convertedReactions = rxn.toCantera(species)
if isinstance(convertedReactions,list):
self.rmg_ctReactions.extend(convertedReactions)
else:
self.rmg_ctReactions.append(convertedReactions)
job = Cantera()
job.loadChemkinModel(chemkinPath, transportFile=transportPath,quiet=True)
self.ctSpecies = job.model.species()
self.ctReactions = job.model.reactions()
def testSpeciesConversion(self):
"""
Test that species objects convert properly
"""
from rmgpy.tools.canteraModel import checkEquivalentCanteraSpecies
for i in range(len(self.ctSpecies)):
self.assertTrue(checkEquivalentCanteraSpecies(self.ctSpecie
|
s[i],self.rmg_ctSpecies[i]))
def testReactionConversion(self):
"""
Test that species objects convert properly
"""
from rmgpy.tools.cantera
|
Model import checkEquivalentCanteraReaction
for i in range(len(self.ctReactions)):
self.assertTrue(checkEquivalentCanteraReaction(self.ctReactions[i],self.rmg_ctReactions[i]))
|
InterestingLab/elasticmanager
|
cluster/models.py
|
Python
|
mit
| 1,207 | 0.000829 |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from elasticsearch import Elasticsearch
@python_2_unicode_compatible
class ElasticCluster(models.Model):
class Meta:
db_table = 'cluster_elastic_cluster'
# cluster name
name = models.CharField(max_length=128)
host = models.CharField(max_length=256)
port = models.IntegerField()
def __str__(self):
return '{name} {host}:{port}'.format(name=self.name, host=self.host, port=self.port)
|
def address(self):
return '{host}:{port}'.format(host=self.hos
|
t, port=self.port)
def client(self, timeout=30):
return Elasticsearch(self.address(), timeout=timeout)
def info(self):
info = self.client().info()
ret = {
'cluster_name': info['cluster_name'],
'elasticsearch_version': info['version']['number'],
'lucene_version': info['version']['lucene_version'],
}
return ret
def health(self):
es = self.client()
return es.cluster.health()
def pending_tasks(self):
es = self.client()
tasks = es.cluster.pending_tasks()
return len(tasks), tasks
|
dripton/ampchat
|
chatserver.py
|
Python
|
mit
| 3,458 | 0.001446 |
#!/usr/bin/env python
import sys
from twisted.protocols import amp
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory
from twisted.python import usage
from twisted.cred.checkers import FilePasswordDB
from twisted.cred.portal import Portal
from twisted.cred import credentials
from Realm import Realm, IAvatar
import commands
default_port = 65432
class Options(usage.Options):
optParameters = [
["port", "p", default_port, "server port"],
]
class ChatProtocol(amp.AMP):
@commands.Login.responder
def login(self, username, password):
"""Attempt to login."""
if username in self.factory.username_to_protocol:
raise commands.LoginError("User '%s' already logged in" % username)
creds = credentials.UsernamePassword(username, password)
deferred = self.factory.portal.login(creds, None, IAvatar)
deferred.addCallback(self.login_succeeded)
deferred.addErrback(self.login_failed)
return deferred
def login_succeeded(self, (avatar_interface, avatar, logout)):
name = avatar.name
self.username = name
self.factory.username_to_protocol[name] = self
# Tell all users about this user
for protocol in self.factory.username_to_protocol.itervalues():
protocol.callRemote(commands.AddUser, user=name)
# Tell this user about all other users
for username in self.f
|
actory.username_to_protocol:
if username != name:
self.callRemote(commands.AddUser, user=username)
return {}
def login_failed(self, failure):
raise commands.LoginError("Incorrect username or password")
@commands.SendToUsers.responder
def send_to_users(self, message, usernames):
for username in usernames:
protocol = self.factory.
|
username_to_protocol.get(username)
if protocol:
protocol.callRemote(commands.Send, message=message,
sender=self.username)
# Also show it to the sender
if self.username not in usernames:
self.callRemote(commands.Send, message=message,
sender=self.username)
return {}
@commands.SendToAll.responder
def send_to_all(self, message):
for protocol in self.factory.username_to_protocol.itervalues():
protocol.callRemote(commands.Send, message=message,
sender=self.username)
return {}
def connectionLost(self, unused):
try:
del self.factory.username_to_protocol[self.username]
except KeyError:
pass
for protocol in self.factory.username_to_protocol.itervalues():
protocol.callRemote(commands.DelUser, user=self.username)
class ChatFactory(ServerFactory):
protocol = ChatProtocol
def __init__(self, portal):
self.portal = portal
self.username_to_protocol = {}
def main():
options = Options()
try:
options.parseOptions()
except usage.UsageError, err:
print "%s: %s" % (sys.argv[0], err)
print "%s: Try --help for usage details" % sys.argv[0]
sys.exit(1)
port = int(options["port"])
realm = Realm()
checker = FilePasswordDB("passwd.txt")
portal = Portal(realm, [checker])
factory = ChatFactory(portal)
reactor.listenTCP(port, factory)
reactor.run()
if __name__ == "__main__":
main()
|
slgphantom/RocketMap
|
pogom/schedulers.py
|
Python
|
agpl-3.0
| 46,757 | 0.000021 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Schedulers determine how worker's queues get filled. They control which
locations get scanned, in what order, at what time. This allows further
optimizations to be easily added, without having to modify the existing
overseer and worker thread code.
Schedulers will recieve:
queues - A list of queues for the workers they control. For now, this is a
list containing a single queue.
status - A list of status dicts for the workers. Schedulers can use this
information to make more intelligent scheduling decisions.
Useful values include:
- last_scan_date: unix timestamp of when the last scan was
completed
- location: [lat,lng,alt] of the last scan
args - The configuration arguments. This may not include all of the arguments,
just ones that are relevant to this scheduler instance (eg. if
multiple locations become supported, the args passed to the
scheduler will only contain the parameters for the location
it handles)
Schedulers must fill the queues with items to search.
Queue items are a list containing:
[step, (latitude, longitude, altitude),
appears_seconds, disappears_seconds)]
Where:
- step is the step number. Used only for display purposes.
- (latitude, longitude, altitude) is the location to be scanned.
- appears_seconds is the unix
|
timestamp of when the pokemon next a
|
ppears
- disappears_seconds is the unix timestamp of when the
pokemon next disappears
appears_seconds and disappears_seconds are used to skip scans that are too
late, and wait for scans the worker is early for. If a scheduler doesn't
have a specific time a location needs to be scanned, it should set
both to 0.
If implementing a new scheduler, place it before SchedulerFactory, and
add it to __scheduler_classes
'''
import itertools
import logging
import math
import geopy
import json
import time
import sys
from timeit import default_timer
from threading import Lock
from copy import deepcopy
import traceback
from collections import Counter
from queue import Empty
from operator import itemgetter
from datetime import datetime, timedelta
from .transform import get_new_coords
from .models import (hex_bounds, Pokemon, SpawnPoint, ScannedLocation,
ScanSpawnPoint)
from .utils import now, cur_sec, cellid, date_secs, equi_rect_distance
from .altitude import get_altitude
log = logging.getLogger(__name__)
# Simple base class that all other schedulers inherit from.
# Most of these functions should be overridden in the actual scheduler classes.
# Not all scheduler methods will need to use all of the functions.
class BaseScheduler(object):
def __init__(self, queues, status, args):
self.queues = queues
self.status = status
self.args = args
self.scan_location = False
self.size = None
self.ready = False
# Schedule function fills the queues with data.
def schedule(self):
log.warning('BaseScheduler does not schedule any items')
# location_changed function is called whenever the location being
# scanned changes.
# scan_location = (lat, lng, alt)
def location_changed(self, scan_location, dbq):
self.scan_location = scan_location
self.empty_queues()
# scanning_pause function is called when scanning is paused from the UI.
# The default function will empty all the queues.
# Note: This function is called repeatedly while scanning is paused!
def scanning_paused(self):
self.empty_queues()
def getsize(self):
return self.size
def get_overseer_message(self):
nextitem = self.queues[0].queue[0]
message = 'Processing search queue, next item is {:6f},{:6f}'.format(
nextitem[1][0], nextitem[1][1])
# If times are specified, print the time of the next queue item, and
# how many seconds ahead/behind realtime
if nextitem[2]:
message += ' @ {}'.format(
time.strftime('%H:%M:%S', time.localtime(nextitem[2])))
if nextitem[2] > now():
message += ' ({}s ahead)'.format(nextitem[2] - now())
else:
message += ' ({}s behind)'.format(now() - nextitem[2])
return message
# check if time to refresh queue
def time_to_refresh_queue(self):
return self.queues[0].empty()
def task_done(self, *args):
return self.queues[0].task_done()
# Return the next item in the queue
def next_item(self, search_items_queue):
step, step_location, appears, leaves = self.queues[0].get()
remain = appears - now() + 10
messages = {
'wait': 'Waiting for item from queue.',
'early': 'Early for {:6f},{:6f}; waiting {}s...'.format(
step_location[0], step_location[1], remain),
'late': 'Too late for location {:6f},{:6f}; skipping.'.format(
step_location[0], step_location[1]),
'search': 'Searching at {:6f},{:6f},{:6f}.'.format(
step_location[0], step_location[1], step_location[2]),
'invalid': ('Invalid response at {:6f},{:6f}, ' +
'abandoning location.').format(step_location[0],
step_location[1])
}
return step, step_location, appears, leaves, messages
# How long to delay since last action
def delay(self, *args):
return self.args.scan_delay # always scan delay time
# Function to empty all queues in the queues list
def empty_queues(self):
self.ready = False
for queue in self.queues:
if not queue.empty():
try:
while True:
queue.get_nowait()
except Empty:
pass
# Hex Search is the classic search method, with the pokepath modification,
# searching in a hex grid around the center location.
class HexSearch(BaseScheduler):
# Call base initialization, set step_distance.
def __init__(self, queues, status, args):
BaseScheduler.__init__(self, queues, status, args)
# If we are only scanning for pokestops/gyms, the scan radius can be
# 450m. Otherwise 70m.
if self.args.no_pokemon:
self.step_distance = 0.450
else:
self.step_distance = 0.070
self.step_limit = args.step_limit
# This will hold the list of locations to scan so it can be reused,
# instead of recalculating on each loop.
self.locations = False
# On location change, empty the current queue and the locations list
def location_changed(self, scan_location, dbq):
self.scan_location = scan_location
self.empty_queues()
self.locations = False
# Generates the list of locations to scan.
def _generate_locations(self):
NORTH = 0
EAST = 90
SOUTH = 180
WEST = 270
# Dist between column centers.
xdist = math.sqrt(3) * self.step_distance
ydist = 3 * (self.step_distance / 2) # Dist between row centers.
results = []
results.append((self.scan_location[0], self.scan_location[1], 0))
if self.step_limit > 1:
loc = self.scan_location
# Upper part.
ring = 1
while ring < self.step_limit:
loc = get_new_coords(
loc, xdist, WEST if ring % 2 == 1 else EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(
loc, xdist / 2, EAST if ring % 2 == 1 else WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(
loc, xdist, EAST if ring % 2 == 1 else WEST)
results.append((loc[0], loc[1], 0))
|
lczub/TestLink-API-Python-client
|
test/conftest.py
|
Python
|
apache-2.0
| 2,865 | 0.008028 |
#! /usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright 2018-2019 Luiko Czub, TestLink-API-Python-client developers
#
# Licensed under the Apache License, Version 2.0
|
(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl
|
icable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
import os.path
import pytest
from testlink import TestlinkAPIClient, TestlinkAPIGeneric, TestLinkHelper
# example text file attachment = this python file
# why not using os.path.realpath(__file__)
# -> cause __file__ could be compiled python file *.pyc, if the test run is
# repeated without changing the test code
ATTACHMENT_EXAMPLE_TEXT= os.path.join(os.path.dirname(__file__),
os.path.basename(__file__))
#attachemantFile = open(ATTACHMENT_EXAMPLE_TEXT, 'r')
@pytest.fixture()
def attachmentFile():
''' open readonly attachment sample before test and close it afterwards '''
aFile = open(ATTACHMENT_EXAMPLE_TEXT, 'r')
yield aFile
aFile.close()
@pytest.fixture(scope='session')
def api_helper_class():
return TestLinkHelper
@pytest.fixture(scope='session')
def api_generic_client(api_helper_class):
''' Init TestlinkAPIGeneric Client with connection parameters defined in
environment variables
TESTLINK_API_PYTHON_DEVKEY and TESTLINK_API_PYTHON_DEVKEY
'''
return api_helper_class().connect(TestlinkAPIGeneric)
@pytest.fixture(scope='session')
def api_general_client(api_helper_class):
''' Init TestlinkAPIClient Client with connection parameters defined in
environment variables
TESTLINK_API_PYTHON_DEVKEY and TESTLINK_API_PYTHON_DEVKEY
'''
return api_helper_class().connect(TestlinkAPIClient)
@pytest.fixture(scope='session', params=[TestlinkAPIGeneric, TestlinkAPIClient])
def api_client_class(request):
''' all variations of Testlink API Client classes '''
return request.param
@pytest.fixture(scope='session')
def api_client(api_client_class, api_helper_class):
''' Init Testlink API Client class defined in fixtures api_client_class with
connection parameters defined in environment variables
TESTLINK_API_PYTHON_DEVKEY and TESTLINK_API_PYTHON_DEVKEY
Tests will be call for each Testlink API Client class, defined in
fixtures parameter list
'''
return api_helper_class().connect(api_client_class)
|
mpg-age-bioinformatics/bit
|
bit/config.py
|
Python
|
mit
| 9,252 | 0.016105 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import getpass
from os.path import expanduser
import stat
import shutil
import bit.git as git
structure="\n\
/file_system_a\n\
|\n\
'- data\n\
|\n\
'- projects\n\
|\n\
|- Company_A\n\
| |\n\
| |- CA_project_y\n\
| |\n\
| '- CA_projec
|
t_x\n\
| |\n\
| |- results\n\
| |- models\n\
| |- scripts\n\
| |- tmp\n\
| |- slurm_logs\n\
| '- wiki\n\
|\n\
'- Company_B\n\
|\n\
'- CB_project_n\n\n\
absolute
|
path to projects = /file_system_a/data/projects/"
requirements=["owncloud_address","owncloud_upload_folder",\
"owncloud_download_folder","owncloud_user",\
"owncloud_pass","github_address",\
"github_organization","github_user",\
"github_pass","local_path", "user_group" ]
special_reqs=["owncloud_user","owncloud_pass",\
"github_user","github_pass"]
start_reqs=["github_address","github_organization",\
"github_user","github_pass","local_path"]
def get_owncloud_address():
owncloud_address=str(input("Please give in your ownCloud address (eg. http://domain.tld/owncloud): ")) or None
return owncloud_address
def get_owncloud_upload_folder():
owncloud_upload_folder=str(input("Please give in the folder in your ownCloud that will be used to deliver data to users.\nYou can share this folder with your colleagues so that everybody delivers data through the same folder. (default: DELIVERY_SERVICE):")) or "DELIVERY_SERVICE"
return owncloud_upload_folder
def get_owncloud_download_folder():
owncloud_download_folder=str(input("Please give in the folder in your ownCloud that will be used to retrieve data from users.\nYou can share this folder with your colleagues so that everybody retrieves data through the same folder. (default: DROPBOX):")) or "DROPBOX"
return owncloud_download_folder
def get_owncloud_user(config_file=None):
if config_file:
owncloud_user=str(input("Please give in your ownCloud user name or press Enter if you do not want to save this information on the config file: ")) or None
else:
owncloud_user=str(input("Please give in your ownCloud user name: ")) or None
return owncloud_user
def get_owncloud_pass(config_file=None):
if config_file:
owncloud_pass=str(getpass.getpass(prompt="Please give in your ownCloud password or press Enter if you do not want to save this information on the config file: ")) or None
else:
owncloud_pass=str(getpass.getpass(prompt="Please give in your ownCloud password: ")) or None
return owncloud_pass
def get_github_address():
github_address=str(input("Github server address (default: https://github.com): ") or "https://github.com")
return github_address
def get_github_organization():
github_organization=str(input("Your GitHub organization name (eg. mpg-age-bioinformatics for https://github.com/mpg-age-bioinformatics): ")) or None
return github_organization
def get_github_user(config_file=None,gitssh=None):
if not gitssh:
if config_file:
github_user=str(input("Please give in your user name for your github server or press Enter if you do not want to save this information on the config file: ")) or None
else:
github_user=str(input("Please give in your user name for your github server: ")) or None
else:
github_user=None
return github_user
def get_github_pass(config_file=None,gitssh=None):
if not gitssh:
if config_file:
github_pass=str(getpass.getpass(prompt="Please give in your password or access token (infos on: https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/) for your github server or press Enter if you do not want to save this information on the config file: ")) or None
else:
github_pass=str(getpass.getpass(prompt="Please give in your password or access token for your github server: ")) or None
else:
github_pass=None
return github_pass
def get_local_path(structure=structure):
local_path=str(input("The bermuda information triangle works on the basis that all your projects are located in the same path and have a parent subpath in your local machine ie. %s\n Please give in the absolute path to your projects folder: " %structure ) ) or None
return local_path
def get_user_group():
user_group=str(input("If you are using ACLs to give your group members access to this project please give in the users that will have read write access to every projects top folders. eg. userA,userB,userC -- DO NOT forger to give in your own user name: ")) or None
if user_group:
user_group=user_group.split(",")
return user_group
def get_github_api(github_address):
if "github.com" in github_address:
github_api="https://api.github.com/orgs/"
else:
github_api=github_address+"/api/v3/orgs/"
return github_api
def make_bitconfig(require_func=requirements,special_reqs=special_reqs):
configdic={}
configdic=check_reqs(require_func,configdic,config_file=True, gitssh=None)
uhome=expanduser("~")+"/"
configfile=open(uhome+".bit_config","w+")
with open(uhome+".bit_config", 'w') as configfile:
json.dump(configdic, configfile)
os.chmod(uhome+".bit_config", stat.S_IRWXU )
print("Your bit config file as been generated:")
for c in configdic:
if "pass" not in c:
print( c, configdic.get(c) )
sys.stdout.flush()
elif configdic.get(c) == None:
print(c, configdic.get(c) )
sys.stdout.flush()
else:
print(c, "*")
sys.stdout.flush()
def read_bitconfig(showit=None,bit_config=".bit_config"):
uhome=expanduser("~")+"/"
with open(uhome+bit_config, 'r') as configfile:
configdic=json.load(configfile)
if showit:
for c in configdic:
if "pass" not in c:
print(c, configdic.get(c))
sys.stdout.flush()
elif configdic.get(c) == None:
print(c, configdic.get(c))
sys.stdout.flush()
else:
print(c, "*")
sys.stdout.flush()
return configdic
def check_reqs(requirements,configdic,config_file=None, gitssh=None):
if "owncloud_address" in requirements:
configdic["owncloud_address"]=get_owncloud_address()
if "owncloud_upload_folder" in requirements:
configdic["owncloud_upload_folder"]=get_owncloud_upload_folder()
if "owncloud_download_folder" in requirements:
configdic["owncloud_download_folder"]=get_owncloud_download_folder()
if "owncloud_user" in requirements:
configdic["owncloud_user"]=get_owncloud_user(config_file=config_file)
if "owncloud_pass" in requirements:
configdic["owncloud_pass"]=get_owncloud_pass(config_file=config_file)
if "github_address" in requirements:
configdic["github_address"]=get_github_address()
if "github_organization" in requirements:
configdic["github_organization"]=get_github_organization()
if "github_user" in requirements:
configdic["github_user"]=get_github_user(config_file=config_file,gitssh=gitssh )
if "github_pass" in requirements:
configdic["github_pass"]=get_github_pass(config_file=config_file,gitssh=gitssh )
if "local_path" in requirements:
configdic["local_path"]=get_local_path()
if "user_group" in requirements:
configdic["user_group"]=get_user_group()
return configdic
def init_user(path_to_project,github_address,github_organization,github_repo,github_user=None,github_pass=None,gitssh=None):
user_name=getpass.getuser()
if not os.path.exists(path_to_project):
os.makedirs(path_to_project)
response=git.git_clone(path_to_project+"/scripts."+user_name , github_address, github_organiz
|
christoffkok/auxi.0
|
src/tests.py
|
Python
|
lgpl-3.0
| 2,865 | 0.000349 |
#!/usr/bin/env python3
"""
This module runs all the tests of the auxi package at once.
"""
import unittest
from auxi.core.objects_test import ObjectUnitTester
from auxi.core.objects_test import NamedObjectUnitTester
from auxi.core.time_test import ClockUnitTester
from auxi.tools.chemistry.stoichiometry_test import StoichFunctionTester
from auxi.tools.chemistry.thermochemistry_test import ThermoFunctionTester
from auxi.tools.materialphysicalproperties.core_test import DataSetTester
from auxi.tools.materialphysicalproperties.idealgas_test \
import BetaTTester, RhoTTester, RhoTPTester, RhoTPxTester
from auxi.tools.materialphysicalproperties.polynomial_test \
import PolynomialModelTTester
from auxi.tools.transportphenomena.heattransfer.naturalconvection_test \
import IsothermalFlatSurface_RegionTester,
|
IsothermalFlatSurfaceTester
from auxi.tools.transportphenomena.dimensionlessquantities_test \
import DimensionlessQiantitiesTester
from auxi.modelling.process.materials.chem_te
|
st \
import ChemMaterialUnitTester, ChemMaterialPackageUnitTester
from auxi.modelling.process.materials.thermo_test \
import ThermoMaterialUnitTester
# from auxi.modelling.process.materials.thermo_test \
# import ThermoMaterialPackageUnitTester
from auxi.modelling.process.materials.psd_test \
import PsdMaterialUnitTester, PsdMaterialPackageUnitTester
from auxi.modelling.process.materials.slurry_test \
import SlurryMaterialUnitTester, SlurryMaterialPackageUnitTester
# MODELLING.FINANCIAL
from auxi.modelling.financial.des_test import GeneralLedgerAccountUnitTester
from auxi.modelling.financial.des_test import TransactionUnitTester
from auxi.modelling.financial.des_test import TransactionTemplateUnitTester
from auxi.modelling.financial.des_test import GeneralLedgerStructureUnitTester
from auxi.modelling.financial.des_test import GeneralLedgerUnitTester
from auxi.modelling.financial.reporting_test import GeneralLedgerStructureUnitTester
from auxi.modelling.financial.reporting_test import TransactionListUnitTester
# MODELLING.BUSINESS
from auxi.modelling.business.structure_test import ActivityUnitTester
from auxi.modelling.business.structure_test import ComponentUnitTester
from auxi.modelling.business.structure_test import EntityUnitTester
from auxi.modelling.business.basic_test import BasicActivityUnitTester
from auxi.modelling.business.basic_test import BasicLoanActivityUnitTester
from auxi.modelling.business.models_test import TimeBasedModelUnitTester
__version__ = '0.3.2'
__license__ = 'LGPL v3'
__copyright__ = 'Copyright 2016, Ex Mente Technologies (Pty) Ltd'
__author__ = 'Christoff Kok, Johan Zietsman'
__credits__ = ['Christoff Kok', 'Johan Zietsman']
__maintainer__ = 'Christoff Kok'
__email__ = 'christoff.kok@ex-mente.co.za'
__status__ = 'Planning'
if __name__ == '__main__':
unittest.main()
|
vim-awesome/vim-awesome
|
web/api/api.py
|
Python
|
mit
| 7,261 | 0.001102 |
import itertools
import json
import re
import flask
from flask import request
from web.cache import cache
import rethinkdb as r
import web.api.api_util as api_util
import db
import util
api = flask.Blueprint("api", __name__, url_prefix="/api")
r_conn = db.util.r_conn
def _should_skip_get_plugins_cache():
""
|
"Whether the current request to /api/plugins should not be cached."""
|
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
# Only cache empty searches for now.
# TODO(david): Also cache simple category and tag searches. May also want
# to actually use a proper cache backend like Redis so we can
# arbitrarily cache (right now we use an in-memory cache).
should_cache = search == '' and (1 <= page <= 10)
return not should_cache
def _make_get_plugins_cache_key():
"""Get a cache key for the /api/plugins route.
By default this is just request.path which ignores query params.
"""
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
return '%s_%s_%s' % (request.path, page, search)
# TODO(david): Consider saving categories just as special tags. Would make
# search implementation simpler but determining which category a plugin
# belongs to harder. See discussion on
# http://phabricator.benalpert.com/D171
def _apply_category_filters(results, tokens):
"""Consumes and applies category filters (e.g. "cat:other") to results.
Arguments:
results: List of search result plugins.
tokens: Remaining search text tokens that have not been consumed.
Returns:
(results, tokens): Results that match the given category, and tokens
that have not been consumed.
"""
category_filter = lambda t: t.startswith('cat:')
category_tokens = filter(category_filter, tokens)
tokens = list(itertools.ifilterfalse(category_filter, tokens))
if category_tokens:
category_ids = set(t[len('cat:'):] for t in category_tokens)
results = filter(lambda plugin:
plugin['category'] in category_ids, results)
return results, tokens
def _apply_tag_filters(results, tokens):
"""Consumes and applies tag filters (e.g. "tag:python") to search results.
Arguments:
results: List of search result plugins.
tokens: Remaining search text tokens that have not been consumed.
Returns:
(results, tokens): Results that match the given tag, and tokens
that have not been consumed.
"""
tag_filter = lambda t: t.startswith('tag:')
tag_tokens = filter(tag_filter, tokens)
tokens = list(itertools.ifilterfalse(tag_filter, tokens))
if tag_tokens:
required_tags = set(t[len('tag:'):] for t in tag_tokens)
results = filter(lambda plugin:
required_tags <= set(plugin['tags']), results)
return results, tokens
def _apply_keyword_filters(results, tokens):
"""Filters results that match the given keywords (tokens).
Arguments:
results: List of search result plugins.
tokens: Keywords to filter results on.
Returns:
List of plugins that match the given keywords.
"""
if tokens:
# Create a regex that matches a string S iff for each keyword K in
# `search` there is a corresponding word in S that begins with K.
tokens_regex = (r'\b%s' % re.escape(t) for t in tokens)
search_regex = re.compile('.*'.join(tokens_regex))
# Surprisingly, regex matching like this is slightly faster than
# prefix-matching two sorted lists of tokens.
results = filter(lambda plugin:
search_regex.search(plugin['keywords']), results)
return results
@api.route('/plugins', methods=['GET'])
@cache.cached(timeout=60 * 60 * 25, key_prefix=_make_get_plugins_cache_key,
unless=_should_skip_get_plugins_cache)
def get_plugins():
RESULTS_PER_PAGE = 20
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
results = get_search_index_cached()
if search:
tokens = [t.lower() for t in sorted(search.split())]
results, tokens = _apply_category_filters(results, tokens)
results, tokens = _apply_tag_filters(results, tokens)
results = _apply_keyword_filters(results, tokens)
count = len(results)
total_pages = (count + RESULTS_PER_PAGE - 1) / RESULTS_PER_PAGE # ceil
results = results[((page - 1) * RESULTS_PER_PAGE):
(page * RESULTS_PER_PAGE)]
return api_util.jsonify({
'plugins': results,
'total_pages': total_pages,
'total_results': count,
'results_per_page': RESULTS_PER_PAGE,
})
@api.route('/plugins/<slug>', methods=['GET'])
def get_plugin(slug):
plugin = r.table('plugins').get(slug).run(r_conn())
if plugin:
return api_util.jsonify(db.plugins.to_json(plugin))
else:
return api_util.api_not_found('No plugin with slug %s' % slug)
# TODO(david): Make it not so easy for an attacker to completely obliterate all
# of our tags, or at least be able to recover from it.
@api.route('/plugins/<slug>/tags', methods=['POST', 'PUT'])
def update_plugin_tags(slug):
data = json.loads(flask.request.data)
plugin = r.table('plugins').get(slug).run(r_conn())
if not plugin:
return api_util.api_not_found('No plugin with slug %s' % slug)
db.plugins.update_tags(plugin, data['tags'])
r.table('plugins').update(plugin).run(r_conn())
return api_util.jsonify({
'tags': plugin['tags']
})
@api.route('/tags', methods=['GET'])
@cache.cached(timeout=60 * 60)
def get_tags():
tags = r.table('tags').filter({}).run(r_conn())
return api_util.jsonify(list(tags))
@api.route('/categories', methods=['GET'])
@cache.cached(timeout=60 * 60)
def get_categories():
return api_util.jsonify(get_all_categories_cached())
@api.route('/plugins/<slug>/category/<category>', methods=['PUT'])
def update_plugin_category(slug, category):
plugin = r.table('plugins').get(slug).run(r_conn())
if not plugin:
return api_util.api_not_found('No plugin with slug %s' % slug)
if category not in (c['id'] for c in get_all_categories_cached()):
return api_util.api_bad_request('No such category %s' % category)
# TODO(david): Also update search index (stale cache)
plugin['category'] = category
r.table('plugins').update(plugin).run(r_conn())
return api_util.jsonify({
'category': plugin['category']
})
@api.route('/submit', methods=['POST'])
def submit_plugin():
plugin_data = flask.request.form.to_dict()
plugin_data['tags'] = json.loads(plugin_data['tags'])
db.submitted_plugins.insert(plugin_data)
plugin_markdown = "```\n%s\n```" % json.dumps(plugin_data, indent=4)
util.log_to_gitter("Someone just submitted a plugin!\n%s" % plugin_markdown)
return flask.redirect('/thanks-for-submitting')
@cache.cached(timeout=60 * 60 * 26, key_prefix='search_index')
def get_search_index_cached():
return db.plugins.get_search_index()
@cache.cached(timeout=60 * 60 * 27, key_prefix='all_categories')
def get_all_categories_cached():
return db.categories.get_all()
|
endlessm/chromium-browser
|
native_client/tools/checkdeps/checkdeps.py
|
Python
|
bsd-3-clause
| 17,715 | 0.008863 |
#!/usr/bin/env python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that files include headers from allowed directories.
Checks DEPS files in the source tree for rules, and applies those rules to
"#include" commands in source files. Any source file including something not
permitted by the DEPS files will fail.
The format of the deps file:
First you have the normal module-level deps. These are the ones used by
gclient. An example would be:
deps = {
"base":"http://foo.bar/trunk/base"
}
DEPS files not in the top-level of a module won't need this. Then you have
any additional include rules. You can add (using "+") or subtract (using "-")
from the previously specified rules (including module-level deps).
include_rules = {
# Code should be able to use base (it's specified in the module-level
# deps above), but nothing in "base/evil" because it's evil.
"-base/evil",
# But this one subdirectory of evil is OK.
"+base/evil/not",
# And it can include files from this other directory even though there is
# no deps rule for it.
"+tools/crime_fighter"
}
DEPS files may be placed anywhere in the tree. Each one applies to all
subdirectories, where there may be more DEPS files that provide additions or
subtractions for their own sub-trees.
There is an implicit rule for the current directory (where the DEPS file lives)
and all of its subdirectories. This prevents you from having to explicitly
allow the current directory everywhere. This implicit rule is applied first,
so you can modify or remove it using the normal include rules.
The rules are processed in order. This means you can explicitly allow a higher
directory and then take away permissions from sub-parts, or the reverse.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and use
only lowercase.
"""
from __future__ import print_function
import os
import optparse
import pipes
import re
import sys
import copy
# Variable name used in the DEPS file to add or subtract include files from
# the module-level deps.
INCLUDE_RULES_VAR_NAME = "include_rules"
# Optionally present in the DEPS file to list subdirectories which should not
# be checked. This allows us to skip third party code, for example.
SKIP_SUBDIRS_VAR_NAME = "skip_child_includes"
# The maximum number of non-include lines we can see before giving up.
MAX_UNINTERESTING_LINES = 50
# The maximum line length, this is to be efficient in the case of very long
# lines (which can't be #includes).
MAX_LINE_LENGTH = 128
# Set to true for more output. This is set by the command line options.
VERBOSE = False
# This regular expression will be used to extract filenames from include
# statements.
EXTRACT_INCLUDE_PATH = re.compile('[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"')
# In lowercase, using forward slashes as directory separators, ending in a
# forward slash. Set by the command line options.
BASE_DIRECTORY = ""
# The directories which contain the sources managed by git.
GIT_SOURCE_DIRECTORY = set()
# Specifies a single rule for an include, which can be either allow or disallow.
class Rule(object):
def __init__(self, allow, dir, source):
self._allow = allow
self._dir = dir
self._source = source
def __str__(self):
if (self._allow):
return '"+%s" from %s.' % (self._dir, self._source)
return '"-%s" from %s.' % (self._dir, self._source)
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + "/")
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + "/")
def ParseRuleString(rule_string, source):
"""Returns a tuple of a boolean indicating whether the directory is an allow
rule, and a string holding the directory name.
"""
if len(rule_string) < 1:
raise Exception('The rule string "%s" is too short\nin %s' %
(rule_string, source))
if rule_string[0] == "+":
return (True, rule_string[1:])
if rule_string[0] == "-":
return (False, rule_string[1:])
raise Exception('The rule string "%s" does not begin with a "+" or a "-"' %
rule_string)
class Rules:
def __init__(self):
"""Initializes the current rules with an empty rule list."""
self._rules = []
def __str__(self):
ret = "Rules = [\n"
ret += "\n".join([" %s" % x for x in self._rules])
ret += "]\n"
return ret
def AddRule(self, rule_string, source):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
"""
(add_rule, rule_dir) = ParseRuleString(rule_string, source)
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not
|
"foobar".
self._rules = [x for x in self._rules if n
|
ot x.ParentOrMatch(rule_dir)]
self._rules.insert(0, Rule(add_rule, rule_dir, source))
def DirAllowed(self, allowed_dir):
"""Returns a tuple (success, message), where success indicates if the given
directory is allowed given the current set of rules, and the message tells
why if the comparison failed."""
for rule in self._rules:
if rule.ChildOrMatch(allowed_dir):
# This rule applies.
if rule._allow:
return (True, "")
return (False, rule.__str__())
# No rules apply, fail.
return (False, "no rule applying")
def ApplyRules(existing_rules, includes, cur_dir):
"""Applies the given include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
include: The list of rules from the "include_rules" section of DEPS.
cur_dir: The current directory. We will create an implicit rule that
allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments.
"""
rules = copy.copy(existing_rules)
# First apply the implicit "allow" rule for the current directory.
if cur_dir.lower().startswith(BASE_DIRECTORY):
relative_dir = cur_dir[len(BASE_DIRECTORY) + 1:]
# Normalize path separators to slashes.
relative_dir = relative_dir.replace("\\", "/")
source = relative_dir
if len(source) == 0:
source = "top level" # Make the help string a little more meaningful.
rules.AddRule("+" + relative_dir, "Default rule for " + source)
else:
raise Exception("Internal error: base directory is not at the beginning" +
" for\n %s and base dir\n %s" %
(cur_dir, BASE_DIRECTORY))
# Last, apply the additional explicit rules.
for (index, rule_str) in enumerate(includes):
if not len(relative_dir):
rule_description = "the top level include_rules"
else:
rule_description = relative_dir + "'s include_rules"
rules.AddRule(rule_str, rule_description)
return rules
def ApplyDirectoryRules(existing_rules, dir_name):
"""Combines rules from the existing rules and the new directory.
Any directory can contain a DEPS file. Toplevel DEPS files can contain
module dependencies which are used by gclient. We use these, along with
additional include rules and implicit rules for the given directory, to
come up with a combined set of rules to apply for the directory.
Args:
existing_rules: The rules for the parent directory. We'll add-on to these.
dir_name: The directory name that the deps file may live in (if it exists).
This will also be used to genera
|
hail-is/hail
|
gear/gear/clients.py
|
Python
|
mit
| 1,942 | 0.00206 |
from typing import Optional
from gear.cloud_config import get_azure_config, get_gcp_config, get_global_config
from hailtop.aiocloud import aioazure, aiogoogle
from hailtop.aiotools.fs import AsyncFS, AsyncFSFactory
def get_identity_client(credentials_file: Optional[str] = None):
if credentials_file is None:
credentials_file = '/gsa-key/key.json'
cloud = get_global_config()['cloud']
if cloud == 'azure':
scopes = ['https://graph.microsoft.com/.default']
return aioazure.AzureGraphClient(
credentials_file=credentials_file,
scopes=scopes,
)
assert
|
cloud == 'gcp', cloud
project = get_gcp_config().project
return aiogoogle.GoogleIAmClient(project, credentials_file=credentials_file)
def get_compute_client(credentials_file: Optional[str] = None):
if credentials_file is None:
credentials_file = '/gsa-key/key.json'
cloud = get_global_config()['cloud']
if cloud == 'azure':
azure_config = get_azure_config()
return aioazure.AzureComputeClient(azure_config.subscription_id, azure_c
|
onfig.resource_group)
assert cloud == 'gcp', cloud
project = get_gcp_config().project
return aiogoogle.GoogleComputeClient(project, credentials_file=credentials_file)
def get_cloud_async_fs(credentials_file: Optional[str] = None) -> AsyncFS:
if credentials_file is None:
credentials_file = '/gsa-key/key.json'
cloud = get_global_config()['cloud']
if cloud == 'azure':
return aioazure.AzureAsyncFS(credential_file=credentials_file)
assert cloud == 'gcp', cloud
return aiogoogle.GoogleStorageAsyncFS(credentials_file=credentials_file)
def get_cloud_async_fs_factory() -> AsyncFSFactory:
cloud = get_global_config()['cloud']
if cloud == 'azure':
return aioazure.AzureAsyncFSFactory()
assert cloud == 'gcp', cloud
return aiogoogle.GoogleStorageAsyncFSFactory()
|
nijinashok/sos
|
sos/plugins/dbus.py
|
Python
|
gpl-2.0
| 745 | 0 |
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted materi
|
al is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Dbus(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""D-Bu
|
s message bus"""
plugin_name = "dbus"
profiles = ('system',)
packages = ('dbus',)
def setup(self):
self.add_copy_spec([
"/etc/dbus-1",
"/var/lib/dbus/machine-id"
])
# vim: set et ts=4 sw=4 :
|
ntt-sic/nova
|
nova/api/openstack/compute/contrib/aggregates.py
|
Python
|
apache-2.0
| 8,962 | 0.000112 |
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
import datetime
from webob import exc
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) == 1 and "host" in body:
host = body['host']
else:
raise exc.HTTPBadRequest()
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': [self._marshall_aggregate(a)['aggregate']
for a in aggregates]}
def create(self, req, body):
"""Creates an aggregate, given its name and availability_zone."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
avail_zone = host_aggregate["availability_zone"]
except KeyError:
raise exc.HTTPBadRequest()
try:
utils.check_string_length(name, "Aggregate name", 1, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if len(host_aggregate) != 2:
raise exc.HTTPBadRequest()
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except exception.AggregateNameExists as e:
LOG.info(e)
raise exc.HTTPConflict()
except exception.InvalidAggregateAction as e:
LOG.info(e)
raise
return self._marshall_aggregate(aggregate)
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_("Cannot show aggregate: %s"), id)
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest()
if len(updates) < 1:
raise exc.HTTPBadRequest()
for key in updates.keys():
if key not in ["name", "availability_zone"]:
raise exc.HTTPBadRequest()
if 'name' in updates:
try:
utils.check_string_length(updates['name'], "Aggregate name", 1,
255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNotFound:
LOG.info(_('Cannot update aggregate: %s'), id)
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_('Cannot delete aggregate: %s'), id)
raise exc.HTTPNotFound()
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in body.iteritems():
if action not in _actions.keys():
msg = _('Aggregates does not have %s action') % action
raise exc.HTTPBadRequest(explanation=msg)
return _actions[action](req, id, data)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.ComputeHostNotFound):
LOG.info(_('Cannot add host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPNotFound(
|
)
except (exception.AggregateHostExists,
exception.InvalidAggregateAction) as e:
LOG.info(_('Cannot add host %(host)s in aggregate %(id)s'),
{'host': hos
|
t, 'id': id})
raise exc.HTTPConflict(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound,
exception.ComputeHostNotFound):
LOG.info(_('Cannot remove host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPNotFound()
except exception.InvalidAggregateAction:
LOG.info(_('Cannot remove host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPConflict()
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest()
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound:
LOG.info(_('Cannot set metadata %(metadata)s in aggregate %(id)s'),
{'metadata': metadata, 'id': id})
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
_aggregate = {}
for key, value in aggregate.items():
# NOTE(danms): The original API specified non-TZ-aware times
|
amcat/amcat-dashboard
|
dashboard/migrations/0023_auto_20180702_1140.py
|
Python
|
agpl-3.0
| 696 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-02 11:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrat
|
ions.Migration):
dependencies = [
('dashboard', '0022_query_amcat_options'),
]
operations = [
migrations.AlterField(
model_name='query',
name='amcat_query_id',
field=models.IntegerField(),
),
migrations.AlterUniqueTogether(
name='query',
unique_together=set([('system', 'amcat_que
|
ry_id')]),
),
migrations.AlterModelTable(
name='querycache',
table=None,
),
]
|
levilucio/SyVOLT
|
GM2AUTOSAR_MM/MT_post__indirectLink_S.py
|
Python
|
mit
| 4,553 | 0.028992 |
"""
__MT_post__indirectLink_S.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: levi
Modified: Sun Aug 9 23:46:05 2015
_________________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_MT_post__indirectLink_S import *
clas
|
s MT_post__indirectLink_S(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
|
self.superTypes = []
self.graphClass_ = graph_MT_post__indirectLink_S
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.generatedAttributes = {'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ) }
self.realOrder = ['MT_label__','MT_pivotOut__']
self.directEditing = [1,1]
def clone(self):
cloneObject = MT_post__indirectLink_S( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <--- Remove this if you want to use QOCA
# Get the high level constraint helper and solver
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
# Constraint only makes sense if there exists 2 objects connected to this link
if(not (self.in_connections_ and self.out_connections_)): return
# Get the graphical objects (subclass of graphEntity/graphLink)
graphicalObjectLink = self.graphObject_
graphicalObjectSource = self.in_connections_[0].graphObject_
graphicalObjectTarget = self.out_connections_[0].graphObject_
objTuple = (graphicalObjectSource, graphicalObjectTarget, graphicalObjectLink)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.LeftExactDistance(objTuple, 20)
oc.resolve() # Resolve immediately after creating entity & constraint
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_post__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
|
aerler/WRF-Tools
|
Python/wrfrun/generateStepfile.py
|
Python
|
gpl-3.0
| 4,142 | 0.021004 |
#! /usr/bin/env python
#
# python script to generate a valid stepfile for WRF cycling
#
import sys
import pandas
import calendar
filename = 'stepfile' # default filename
lleap = True # allow leap days (not used in some GCM calendars)
lecho = False
lperiod = False
dateargs = [] # list of date arguments passed to date_range
for arg in sys.argv[1:]:
if arg[:11] == '--interval=':
freq = arg[11:].lower() # remains a string and is interpreted by date_range
elif arg[:8] == '--steps=':
lperiod = True; periods = int(arg[8:]) + 1 # each step is bounded by two timestamps
elif arg == '-l' or arg == '--noleap':
lleap = False # omit leap days
|
to accomodate some GCM calendars
elif arg == '-e' or arg == '--echo':
lecho = True
elif arg == '-h' or arg == '--help':
print('')
print("Usage: "+sys.argv[0]+" [-e] [-h] [--interval=interval] [--steps=steps] begin-date [end-date]")
print(" Interval, begin-date and end-date or steps must be specified.")
print("")
print(" --interval= step spacing / interval (D=days, W=weeks, M=month)")
print(" --steps= number of steps in stepfile")
print("
|
-l | --noleap omit leap days (to accomodate some GCM calendars)")
print(" -e | --echo print steps to stdout instead of writing to stepfile")
print(" -h | --help print this message")
print('')
sys.exit(1)
else:
dateargs.append(arg)
# output patterns
lmonthly = False
dateform = '%Y-%m-%d_%H:%M:%S'
# N.B.: because pandas date_range always anchors intervals at the end of the month, we have to subtract one
# day and add it again later, in order to re-anchor at the first of the month
stepform = '%Y-%m-%d'
offset = pandas.DateOffset() # no offset
if 'w' in freq:
oo = 1 if '-sun' in freq else 0
offset = pandas.DateOffset(days=pandas.to_datetime(dateargs[0]).dayofweek + oo)
elif 'm' in freq:
lmonthly = True
stepform = '%Y-%m'
offset = pandas.DateOffset(days=pandas.to_datetime(dateargs[0]).day)
#print dateargs
begindate = pandas.to_datetime(dateargs[0]) - offset
# check input and generate datelist
if lperiod:
if len(dateargs) != 1: raise ValueError('Can only specify begin-date, if the number of periods is given.')
datelist = pandas.date_range(begindate, periods=periods, freq=freq) # generate datelist
else:
if len(dateargs) != 2: raise ValueError('Specify begin-date and end-date, if no number of periods is given.')
enddate = pandas.to_datetime(dateargs[1]) - offset
datelist = pandas.date_range(begindate, enddate, freq=freq) # generate datelist
# open file, if not writing to stdout
if not lecho: stepfile = open(filename, mode='w')
# iterate over dates (skip first)
lastdate = datelist[0] + offset # first element
llastleap = False
for date in datelist[1:]:
lcurrleap = False
currentdate = date + offset
# N.B.: offset is not the interval/frequency; it is an offset at the beginning of the month or week
if lmonthly:
mon = date.month +1
if mon == 2: maxdays = 29 if calendar.isleap(date.year) else 28
elif mon in [4, 6, 9, 11]: maxdays = 30
else: maxdays = 31
if currentdate > date + pandas.DateOffset(days=maxdays):
currentdate = date + pandas.DateOffset(days=maxdays)
# handle calendars without leap days (turn Feb. 29th into Mar. 1st)
if not lleap and calendar.isleap(currentdate.year) and ( currentdate.month==2 and currentdate.day==29 ):
lcurrleap = True
currentdate += pandas.DateOffset(days=1) # move one day ahead
# generate line for last step
# print currentdate.month,currentdate.day
if lleap or not (freq.lower()=='1d' and llastleap):
# skip if this is daily output, a leap day, and a non-leap-year calendar...
stepline = "{0:s} '{1:s}' '{2:s}'\n".format(lastdate.strftime(stepform),lastdate.strftime(dateform),
currentdate.strftime(dateform))
# write to appropriate output
if lecho: sys.stdout.write(stepline)
else: stepfile.write(stepline)
# remember last step
lastdate = currentdate
llastleap = lcurrleap
# close file
if not lecho: stepfile.close()
|
RyanSkraba/beam
|
sdks/python/apache_beam/runners/dataflow/test_dataflow_runner.py
|
Python
|
apache-2.0
| 4,039 | 0.003714 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Wrapper of Beam runners that's built for running and verifying e2e tests."""
from __future__ import absolute_import
from __future__ import print_function
import logging
import time
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
from apache_beam.runners.runner import PipelineState
__all__ = ['TestDataflowRunner']
# Dataflow take up to 10mins for the long tail of starting/stopping worker
# pool.
WAIT_IN_STATE_TIMEOUT = 10 * 60
_LOGGER = logging.getLogger(__name__)
class TestDataflowRunner(DataflowRunner):
def run_pipeline(self, pipeline, options):
"""Execute test pipeline and verify test matcher"""
test_options = options.view_as(TestOptions)
on_success_matcher = test_options.on_success_matcher
wait_duration = test_options.wait_until_finish_duration
is_streaming = options.view_as(StandardOptions).streaming
# [BEAM-1889] Do not send this to remote workers also, there is no need to
# send this option to remote executors.
test_options.on_success_matcher = None
self.result = super(TestDataflowR
|
unner, self).run_pipeline(
pipeline, options)
if self.result.has_job:
# TODO(markflyhigh)(BEAM-1890): Use print since Nose dosen't show logs
# in some cases.
print('Worker logs: %s' % self.build_console_url(options))
try:
self.wait_until_in_state(PipelineState.RUNNING)
if is_streaming and not wait_duration:
_LOGGER.warning('Waiting indefinitely for streaming job.')
self.result.wait_until_finish(duration=wait_duration)
|
if on_success_matcher:
from hamcrest import assert_that as hc_assert_that
hc_assert_that(self.result, pickler.loads(on_success_matcher))
finally:
if not self.result.is_in_terminal_state():
self.result.cancel()
self.wait_until_in_state(PipelineState.CANCELLED)
return self.result
def build_console_url(self, options):
"""Build a console url of Dataflow job."""
project = options.view_as(GoogleCloudOptions).project
region_id = options.view_as(GoogleCloudOptions).region
job_id = self.result.job_id()
return (
'https://console.cloud.google.com/dataflow/jobsDetail/locations'
'/%s/jobs/%s?project=%s' % (region_id, job_id, project))
def wait_until_in_state(self, expected_state, timeout=WAIT_IN_STATE_TIMEOUT):
"""Wait until Dataflow pipeline enters a certain state."""
if not self.result.has_job:
raise IOError('Failed to get the Dataflow job id.')
start_time = time.time()
while time.time() - start_time <= timeout:
job_state = self.result.state
if self.result.is_in_terminal_state() or job_state == expected_state:
return job_state
time.sleep(5)
raise RuntimeError('Timeout after %d seconds while waiting for job %s '
'enters expected state %s. Current state is %s.' %
(timeout, self.result.job_id(),
expected_state, self.result.state))
|
pydanny/django-admin2
|
example/files/tests/test_models.py
|
Python
|
bsd-3-clause
| 1,470 | 0 |
from os import path
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from files.models import CaptionedFile
fixture_dir = path.join(path.abspath(path.dirname(__file__)), 'fixtures')
class CaptionedFileTestCase(TestCase):
def setUp(self):
self.captioned_file = CaptionedFile.objects.create(
caption="this is a file",
publication=path.join('pubtest.txt')
)
self.captioned_file.save()
def test_creation(self):
cf = CaptionedFile.objects.create(
caption="lo lo
|
",
|
publication=path.join('pubtest.txt')
)
cf.save()
self.assertEqual(CaptionedFile.objects.count(), 2)
# Cause setup created one already
def test_update(self):
self.captioned_file.caption = "I like text files"
self.captioned_file.save()
cf = CaptionedFile.objects.get()
self.assertEqual(cf.caption, "I like text files")
def test_delete(self):
cf = CaptionedFile.objects.get()
cf.delete()
self.assertEqual(CaptionedFile.objects.count(), 0)
class MultiEncodedAdminFormTest(TestCase):
def setUp(self):
self.user = User(
username='admin',
is_staff=True,
is_superuser=True)
self.user.set_password('admin')
self.user.save()
self.create_url = reverse('admin2:example3_captioned_file_create')
|
pbabik/mainnav-reader
|
setup.py
|
Python
|
bsd-2-clause
| 1,980 | 0.002525 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mainnav-reader - Version: 0.5.1
#
# Copyright (c) 2009-2013, Dennis Keitzel
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT H
|
OLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMI
|
TED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
from distutils.core import setup
from mainnav_reader.helper import check_requirements
if sys.argv[1] == 'install':
check_requirements()
setup(
name='mainnav-reader',
version='0.5.1',
author='Dennis Keitzel',
author_email='dennis.keitzel@arcor.de',
url='http://code.google.com/p/mainnav-reader/',
description='This little tool has the ability to read out and delete tracklog data from mainnav gps devices',
license='BSD',
packages=['mainnav_reader'],
scripts=['mainnav-reader'],
)
|
levilucio/SyVOLT
|
RSS2ATOM/contracts/unit/HUnitConnectCAG_ConnectedLHS.py
|
Python
|
mit
| 1,301 | 0.032283 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitConnectCAG_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitConnectCAG_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitConnectCAG_ConnectedLHS, self).__init__(name='HUnitConnectCAG_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitConnectCAG_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class Chan
|
nel(Channel) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Channel"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Channel')
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
|
return True
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/vpn/workflows.py
|
Python
|
apache-2.0
| 19,939 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon import workflows
from openstack_dashboard import api
class AddVPNServiceAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
router_id = forms.ChoiceField(label=_("Router"))
subnet_id = forms.ChoiceField(label=_("Subnet"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVPNServiceAction, self).__init__(request, *args, **kwargs)
def populate_subnet_id_choices(self, request, context):
subnet_id_choices =
|
[('', _("Select a Subnet"))]
try:
tenant_id = request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
n
|
etworks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
return subnet_id_choices
def populate_router_id_choices(self, request, context):
router_id_choices = [('', _("Select a Router"))]
try:
routers = api.neutron.router_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve routers list.'))
routers = []
for r in routers:
router_id_choices.append((r.id, r.name))
self.fields['router_id'].choices = router_id_choices
return router_id_choices
class Meta:
name = _("Add New VPN Service")
permissions = ('openstack.services.network',)
help_text = _("Create VPN Service for current project.\n\n"
"Assign a name and description for the VPN Service. "
"Select a router and a subnet. "
"Admin State is Up (checked) by default."
)
class AddVPNServiceStep(workflows.Step):
action_class = AddVPNServiceAction
contributes = ("name", "description", "subnet_id",
"router_id", "admin_state_up")
def contribute(self, data, context):
context = super(AddVPNServiceStep, self).contribute(data, context)
if data:
return context
class AddVPNService(workflows.Workflow):
slug = "addvpnservice"
name = _("Add VPN Service")
finalize_button_name = _("Add")
success_message = _('Added VPN Service "%s".')
failure_message = _('Unable to add VPN Service "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddVPNServiceStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.vpnservice_create(request, **context)
return True
except Exception:
return False
class AddIKEPolicyAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
auth_algorithm = forms.ChoiceField(label=_("Authorization algorithm"))
encryption_algorithm = forms.ChoiceField(label=_("Encryption algorithm"))
ike_version = forms.ChoiceField(label=_("IKE version"))
lifetime_units = forms.ChoiceField(label=_("Lifetime units for IKE keys"))
lifetime_value = forms.IntegerField(
min_value=60, label=_("Lifetime value for IKE keys"),
initial=3600,
help_text=_("Equal to or more than 60"))
pfs = forms.ChoiceField(label=_("Perfect Forward Secrecy"))
phase1_negotiation_mode = forms.ChoiceField(
label=_("IKE Phase1 negotiation mode"))
def __init__(self, request, *args, **kwargs):
super(AddIKEPolicyAction, self).__init__(request, *args, **kwargs)
auth_algorithm_choices = [("sha1", "sha1")]
self.fields['auth_algorithm'].choices = auth_algorithm_choices
encryption_algorithm_choices = [("3des", "3des"),
("aes-128", "aes-128"),
("aes-192", "aes-192"),
("aes-256", "aes-256")]
self.fields[
'encryption_algorithm'].choices = encryption_algorithm_choices
self.fields['encryption_algorithm'].initial = "aes-128"
# Currently this field has only one choice, so mark it as readonly.
self.fields['encryption_algorithm'].widget.attrs['readonly'] = True
ike_version_choices = [("v1", "v1"),
("v2", "v2")]
self.fields['ike_version'].choices = ike_version_choices
lifetime_units_choices = [("seconds", "seconds")]
self.fields['lifetime_units'].choices = lifetime_units_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['lifetime_units'].widget.attrs['readonly'] = True
pfs_choices = [("group2", "group2"),
("group5", "group5"),
("group14", "group14")]
self.fields['pfs'].choices = pfs_choices
self.fields['pfs'].initial = "group5"
phase1_neg_mode_choices = [("main", "main")]
self.fields[
'phase1_negotiation_mode'].choices = phase1_neg_mode_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['phase1_negotiation_mode'].widget.attrs['readonly'] = True
class Meta:
name = _("Add New IKE Policy")
permissions = ('openstack.services.network',)
help_text = _("Create IKE Policy for current project.\n\n"
"Assign a name and description for the IKE Policy. "
)
class AddIKEPolicyStep(workflows.Step):
action_class = AddIKEPolicyAction
contributes = ("name", "description", "auth_algorithm",
"encryption_algorithm", "ike_version",
"lifetime_units", "lifetime_value",
"pfs", "phase1_negotiation_mode")
def contribute(self, data, context):
context = super(AddIKEPolicyStep, self).contribute(data, context)
context.update({'lifetime': {'units': data['lifetime_units'],
'value': data['lifetime_value']}})
context.pop('lifetime_units')
context.pop('lifetime_value')
if data:
return context
class AddIKEPolicy(workflows.Workflow):
slug = "addikepolicy"
name = _("Add IKE Policy")
finalize_button_name = _("Add")
success_message = _('Added IKE Policy "%s".')
failure_message = _('Unable to add IKE Policy "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIKEPolicyStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ikepolicy_create(request, **context)
return True
except Exceptio
|
nzlosh/st2
|
st2common/tests/unit/test_actionchain_schema.py
|
Python
|
apache-2.0
| 2,701 | 0.00037 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest2
from jsonschema.exceptions import ValidationError
from st2common.models.system import actionchain
from st2tests.fixturesloader import FixturesLoader
FIXTURES_PACK = "generic"
TEST_FIXTURES = {
"actionchains": [
"chain1.yaml",
"malformedchain.yaml",
"no_default_chain.yaml",
"chain_with_vars.yaml",
"chain_with_publish.yaml",
]
}
FIXTURES = FixturesLoader().load_fixtures(
fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_FIXTURES
)
CHAIN_1 = FIXTURES["actionchains"]["chain1.yaml"]
MALFORMED_CHAIN = FIXTURES["actionchains"]["malformedchain.yaml"]
NO_DEFAULT_CHAIN = FIXTURES["actionchains"]["no_default_chain.yaml"]
CHAIN_WITH_VARS = FIXTURES["actionchains"]["chain_with_vars.yaml"]
CHAIN_WITH_PUBLISH = FIXTURES["actionchains"]["chain_with_publish.yaml"]
class ActionChainSchemaTest(unittest2.TestCase):
def test_actionchain_schema_valid(self):
chain = actionchain.ActionChain(**CHAIN_1)
self.assertEqual(len(chain.chain), len(CHAIN_1["chain"]))
self.assertEqual(chain.default, CHAIN_1["default"])
def test_actionchain_no_default(self):
chain = actionchain.ActionChain(**NO_DEFAULT_CHAIN)
self.assertEqual(len(chain.chain), len(NO_DEFAULT_CHAIN["chain"]))
self.assertEqual(chain.default, None)
def test_actionchain_with_vars(self):
chain = actionchain.ActionChain(**CHAIN_WITH_VARS)
self.assertEqual(len(chain.chain), len(CHAIN_WITH_VARS["chain"]))
self.assertEqual(len(cha
|
in.vars), len(CHAIN_WITH_VARS["vars"]))
def test_actionchain_with_publish(self):
chain = actionchain.ActionChain(**CHAIN_WITH_PUBLISH)
self.assertEqual(len(chain.chain), len(CHAIN_WITH_PUBLISH["chain"]))
self.assertEqual(
|
len(chain.chain[0].publish), len(CHAIN_WITH_PUBLISH["chain"][0]["publish"])
)
def test_actionchain_schema_invalid(self):
with self.assertRaises(ValidationError):
actionchain.ActionChain(**MALFORMED_CHAIN)
|
jdavisp3/TigerShark
|
tigershark/facade/utils.py
|
Python
|
bsd-3-clause
| 88 | 0.011364 |
def first(l):
try:
|
return l.pop()
e
|
xcept Exception:
return None
|
endlessm/chromium-browser
|
tools/grit/grit/tool/interface.py
|
Python
|
bsd-3-clause
| 1,507 | 0.011944 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Base class and interface for tools.
'''
from __future__ import print_function
class Tool(object):
'''Base class for all tools. Tools should use their docstring (i.e. the
class-level docstring) for the help they want to have printed when they
are invoked.'''
#
# Interface (abstract methods)
#
def ShortDescription
|
(self):
'''Returns a short description of the functionality of the tool.'''
raise NotImplementedError()
def Run(self, global_options, my_arguments):
'''Runs the tool.
Args:
global_options: object grit_runner.Options
my_arguments: [arg1 arg2 ...]
Return:
0 for success, non-0 for error
'''
raise NotImplementedEr
|
ror()
#
# Base class implementation
#
def __init__(self):
self.o = None
def ShowUsage(self):
'''Show usage text for this tool.'''
print(self.__doc__)
def SetOptions(self, opts):
self.o = opts
def Out(self, text):
'''Always writes out 'text'.'''
self.o.output_stream.write(text)
def VerboseOut(self, text):
'''Writes out 'text' if the verbose option is on.'''
if self.o.verbose:
self.o.output_stream.write(text)
def ExtraVerboseOut(self, text):
'''Writes out 'text' if the extra-verbose option is on.
'''
if self.o.extra_verbose:
self.o.output_stream.write(text)
|
marekpetrik/RAAM
|
raam/examples/inventory/configuration.py
|
Python
|
mit
| 5,606 | 0.019265 |
"""
Global configuration for the problem settings
"""
import numpy as np
from scipy import stats
horizon = 300
runs = 40
DefaultConfiguration = {
"price_buy" : [1.2,2.1,3.3],
"price_sell" : [1,2,3],
"price_probabilities" : np.array([[0.8, 0.1, 0.1],[0.1, 0.8, 0.1],[0.1, 0.1, 0.8]]),
"initial_capacity" : 1,
"initial_inventory" : 0.5,
"degradation" : {"fun":"polynomial","charge":[0.0,0,0.01],
"discharge":[0.01,-0.02,0.01] },
"capacity_cost" : 1,
"change_capacity" : False # assume that the capacity does not change
}
def construct_martingale(prices, variance):
"""
|
Constructs a definitions with a martingale definition of transition probabilities.
The change in price is modeled as a normal distribution with zero mean and
the specified variance.
The capacity of the battery does in fact change
Parameters
----------
prices : array
**Sell** prices that correspond to states in the Martingale price state
process. **Buy** prices are 10% higher.
variance : float
Variance
|
of the normal distribution
Returns
-------
out : dict
Configuration that corresponds to the martingale
"""
states = len(prices)
# defines over how many states the probability is spread over
spread = min(5,states-1)
if type(prices) is not np.ndarray:
prices = np.array(prices)
# relative transition probabilities
p = stats.norm(0,variance).pdf(np.arange(-spread,spread+1))
p = p / p.sum()
# add extra 0s to both ends of p
p = np.concatenate((np.zeros(states-spread-1), p, np.zeros(states-spread-1)))
P = [p[states-i-1:2*states-i-1] for i in range(states)]
P = np.array(P)
P = np.diag(1/P.sum(1)).dot(P)
configuration = {
"price_buy" : 1.1 * prices,
"price_sell" : prices,
"price_probabilities" : P,
"initial_capacity" : 1,
"initial_inventory" : 0.5,
"degradation" : {"fun":"polynomial","charge":[0.0,0,0.01],
"discharge":[0.01,0.02,0.01] },
"capacity_cost" : 1,
"change_capacity" : True # assume that the capacity does not change
}
return configuration
def construct_massdata(degrade):
"""
Returns a problem definition on what is described in the experimental
section of the paper
This uses a simple uniform quantization of energy prices
Paramaters
----------
degrade : bool
Whether the battery degrades
"""
prices = np.array([25.0, 50.0, 75.0, 100.0, 125.0, 150.0, 175.0, 200.0, 250.0, 300.0])
P = np.array([[ 8.15584416e-01, 1.76623377e-01, 5.19480519e-03,
2.59740260e-03, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 4.70114171e-02, 8.72397582e-01, 7.25319006e-02,
7.38750839e-03, 0.00000000e+00, 6.71591672e-04,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 1.19904077e-03, 1.31894484e-01, 7.79376499e-01,
6.95443645e-02, 1.43884892e-02, 3.59712230e-03,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, 4.24528302e-02, 2.83018868e-01,
5.14150943e-01, 1.22641509e-01, 2.35849057e-02,
9.43396226e-03, 0.00000000e+00, 0.00000000e+00,
4.71698113e-03],
[ 0.00000000e+00, 2.15053763e-02, 9.67741935e-02,
2.68817204e-01, 4.30107527e-01, 1.29032258e-01,
4.30107527e-02, 1.07526882e-02, 0.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 3.22580645e-02,
2.58064516e-01, 3.54838710e-01, 1.93548387e-01,
9.67741935e-02, 6.45161290e-02, 0.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, 7.14285714e-02, 1.42857143e-01,
0.00000000e+00, 7.14285714e-02, 2.14285714e-01,
2.85714286e-01, 1.42857143e-01, 7.14285714e-02,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 1.42857143e-01,
0.00000000e+00, 2.85714286e-01, 0.00000000e+00,
0.00000000e+00, 2.85714286e-01, 2.85714286e-01,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 0.00000000e+00, 2.50000000e-01,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]])
if degrade:
degradation = {"fun":"polynomial","charge" : [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00142857142857143],
"discharge" : [0.0, 0.00500000000000000, -0.00750000000000000, 0.00500000000000000, -0.00125000000000000] }
else:
degradation = {"fun":"polynomial","charge" : [0.0],
"discharge" : [0.0] }
configuration = {
"price_buy" : 1.05 * prices,
"price_sell" : 0.95 * prices,
"price_probabilities" : P,
"initial_capacity" : 1,
"initial_inventory" : 0.5,
"degradation" : degradation,
"capacity_cost" : 20000,
"change_capacity" : True
}
return configuration
|
tseaver/google-cloud-python
|
bigtable/tests/unit/test_instance.py
|
Python
|
apache-2.0
| 37,126 | 0.001104 |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
from google.cloud.bigtable.cluster import Cluster
class TestInstance(unittest.TestCase):
PROJECT = "project"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID
LOCATION_ID = "locid"
LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID
APP_PROFILE_PATH = (
"projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/"
)
DISPLAY_NAME = "display_name"
LABELS = {"foo": "bar"}
OP_ID = 8915
OP_NAME = "operations/projects/{}/instances/{}operations/{}".format(
PROJECT, INSTANCE_ID, OP_ID
)
TABLE_ID = "table_id"
TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
@staticmethod
def _get_target_class():
from google.cloud.bigtable.instance import Instance
return Instance
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_defaults(self):
client = object()
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
self.assertIs(instance._client, client)
self.assertIsNone(instance.state)
def test_constructor_non_default(self):
from google.cloud.bigtable import enums
instance_type = enums.Instance.Type.DEVELOPMENT
state = enums.Instance.State.READY
labels = {"test": "test"}
client = object()
instance = self._make_one(
self.INSTANCE_ID,
client,
display_name=self.DISPLAY_NAME,
instance_type=instance_type,
labels=labels,
_state=state,
)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, labels)
self.assertIs(instance._client, client)
self.assertEqual(instance.state, state)
def test__update_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable import enums
instance_type = enums.Instance.Type.PRODUCTION
state = enums.Instance.State.READY
instance_pb = data_v2_pb2.Instance(
display_name=self.DISPLAY_NAME,
type=instance_type,
labels=self.LABELS,
state=state,
)
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, self.LABELS)
self.assertEqual(instance._state, state)
def test__update_from_pb_success_defaults(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable import enums
instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME)
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED)
self.assertFalse(instance.labels)
def test__update_from_pb_no_display_name(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
instance_pb = data_v2_pb2.Instance()
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
with self.assertRaises(ValueError):
instance._update_from_pb(instance_pb)
def test_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable import enums
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance_type = enums.Instance.Type.PRODUCTION
state = enums.Instance.State.READY
instance_pb = data_v2_pb2.Instance(
name=self.INSTANCE_NAME,
dis
|
play_name=self.INSTANCE_ID,
type=instance_type,
labels=self.LABELS,
state=state,
)
klass = self._get_target_class()
instance = klass.from_pb(instance_pb, client)
self.assertIsInstance(instance, klass)
self.assertEqual(instance._client, client)
self.assertEqual(instance.instance_id, self.IN
|
STANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, self.LABELS)
self.assertEqual(instance._state, state)
def test_from_pb_bad_instance_name(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
instance_name = "INCORRECT_FORMAT"
instance_pb = data_v2_pb2.Instance(name=instance_name)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, None)
def test_from_pb_project_mistmatch(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
ALT_PROJECT = "ALT_PROJECT"
credentials = _make_credentials()
client = self._make_client(
project=ALT_PROJECT, credentials=credentials, admin=True
)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, client)
def test_name_property(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
# Patch the the API method.
client._instance_admin_client = api
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.name, self.INSTANCE_NAME)
def test___eq__(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance1, instance2)
def test___eq__type_differ(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = object()
self.assertNotEqual(instance1, instance2)
def test___ne__same_value(self):
client
|
prarthitm/edxplatform
|
openedx/core/djangoapps/user_api/urls.py
|
Python
|
agpl-3.0
| 1,706 | 0.001758 |
"""
Defines the URL routes for this app.
"""
from django.conf import settings
from django.conf.urls import patterns, url
from ..profile_images.views import ProfileImageView
from .accounts.views import AccountDeactivationView, AccountViewSet
from .preferences.views import PreferencesView, PreferencesDetailView
fr
|
om .verification_api.views import PhotoVerificationStatusView
ME = AccountViewSet.as_view({
'get': 'get',
})
ACCOUNT_LIST = AccountViewSet.as_view({
'get': 'list',
})
ACCOUNT_DETAIL = AccountViewSet.as_view
|
({
'get': 'retrieve',
'patch': 'partial_update',
})
urlpatterns = patterns(
'',
url(r'^v1/me$', ME, name='own_username_api'),
url(r'^v1/accounts/{}$'.format(settings.USERNAME_PATTERN), ACCOUNT_DETAIL, name='accounts_api'),
url(r'^v1/accounts$', ACCOUNT_LIST, name='accounts_detail_api'),
url(
r'^v1/accounts/{}/image$'.format(settings.USERNAME_PATTERN),
ProfileImageView.as_view(),
name='accounts_profile_image_api'
),
url(
r'^v1/accounts/{}/deactivate/$'.format(settings.USERNAME_PATTERN),
AccountDeactivationView.as_view(),
name='accounts_deactivation'
),
url(
r'^v1/accounts/{}/verification_status/$'.format(settings.USERNAME_PATTERN),
PhotoVerificationStatusView.as_view(),
name='verification_status'
),
url(
r'^v1/preferences/{}$'.format(settings.USERNAME_PATTERN),
PreferencesView.as_view(),
name='preferences_api'
),
url(
r'^v1/preferences/{}/(?P<preference_key>[a-zA-Z0-9_]+)$'.format(settings.USERNAME_PATTERN),
PreferencesDetailView.as_view(),
name='preferences_detail_api'
),
)
|
daisymax/nvda
|
source/NVDAObjects/IAccessible/sysTreeView32.py
|
Python
|
gpl-2.0
| 10,467 | 0.040222 |
#NVDAObjects/IAccessible/sysTreeView32.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2010 Michael Curran <mick@kulgan.net>, James Teh <jamie@jantrid.net>
from ctypes import *
from ctypes.wintypes import *
import api
import winKernel
import controlTypes
import speech
import UIAHandler
from . import IAccessible
if UIAHandler.isUIAAvailable: from ..UIA import UIA
from .. import NVDAObject
from logHandler import log
import watchdog
TV_FIRST=0x1100
TVIS_STATEIMAGEMASK=0xf000
#Window messages
TVM_GETITEMSTATE=TV_FIRST+39
TVM_GETITEM=TV_FIRST+62
TVM_MAPACCIDTOHTREEITEM=TV_FIRST+42
TVM_MAPHTREEITEMTOACCID=TV_FIRST+43
TVM_GETNEXTITEM=TV_FIRST+10
#item mask flags
TVIF_CHILDREN=0x40
#Relation codes
TVGN_ROOT=0
TVGN_NEXT=1
TVGN_PREVIOUS=2
TVGN_PARENT=3
TVGN_CHILD=4
class TVItemStruct(Structure):
_fields_=[
('mask',c_uint),
('hItem',c_void_p),
('state',c_uint),
('stateMask',c_uint),
('pszText',LPWSTR),
('cchTextMax',c_int),
('iImage',c_int),
('iSelectedImage',c_int),
('cChildren',c_int),
('lParam',LPARAM),
]
class TreeView(IAccessible):
def _get_firstChild(self):
try:
return super(TreeView, self).firstChild
except:
# Broken commctrl 5 tree view.
return BrokenCommctrl5Item.getFirstItem(self)
class TreeViewItem(IAccessible):
def _get_role(self):
return controlTypes.ROLE_TREEVIEWITEM
def _get_treeview_hItem(self):
if not hasattr(self,'_treeview_hItem'):
self._treeview_hItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPACCIDTOHTREEITEM,self.IAccessibleChildID,0)
if not self._treeview_hItem:
# Tree views from comctl < 6.0 use the hItem as the child ID.
self._treeview_hItem=self.IAccessibleChildID
return self._treeview_hItem
def _get_treeview_level(self):
return int(self.IAccessibleObject.accValue(self.IAccessibleChildID))
def _get_states(self):
states=super(TreeViewItem,self)._get_states()
hItem=self.treeview_hItem
itemStates=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETITEMSTATE,hItem,TVIS_STATEIMAGEMASK)
ch=(itemStates>>12)&3
if ch>0:
states.add(controlTypes.STATE_CHECKABLE)
if ch==2:
states.add(controlTypes.STATE_CHECKED)
elif ch==3:
states.add(controlTypes.STATE_HALFCHECKED)
return states
def _get_value(self):
return None
def _get_parent(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_parent()
hItem=self.treeview_hItem
if not hItem:
return super(TreeViewItem,self)._get_parent()
parentItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PARENT,hItem)
if parentItem<=0:
return super(TreeViewItem,self)._get_parent()
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,parentItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=parentItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_firstChild(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_firstChild()
hItem=self.treeview_hItem
if not hItem:
return super(TreeViewItem,self)._get_firstChild()
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_CHILD,hItem)
if childItem<=0:
return super(TreeViewItem,self)._get_firstChild()
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,childItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=childItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_next(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_next()
hItem=self.treeview_hItem
if not hItem:
return None
nextItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,hItem)
if nextItem<=0:
return None
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,nextItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=nextItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_previous(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_previous()
hItem=self.treeview_hItem
if not hItem:
return None
prevItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PREVIOUS,hItem)
if prevItem<=0:
return None
newID=watchdog.cancellableSendMessage(self.w
|
indowHandle,TVM_MAPHTREEITEMTOACCID,prevItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=prevItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_children(self):
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def _get_childCount(self):
hItem=self.treeview_hItem
if not hItem:
|
return 0
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_CHILD,hItem)
if childItem<=0:
return 0
numItems=0
while childItem>0:
numItems+=1
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,childItem)
return numItems
def _get_positionInfo(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_positionInfo()
info={}
info['level']=self.treeview_level
hItem=self.treeview_hItem
if not hItem:
return info
newItem=hItem
index=0
while newItem>0:
index+=1
newItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PREVIOUS,newItem)
newItem=hItem
numItems=index-1
while newItem>0:
numItems+=1
newItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,newItem)
info['indexInGroup']=index
info['similarItemsInGroup']=numItems
return info
def event_stateChange(self):
announceContains = self is api.getFocusObject() and controlTypes.STATE_EXPANDED in self.states and controlTypes.STATE_EXPANDED not in getattr(self,'_speakObjectPropertiesCache',{}).get('states',frozenset())
super(TreeViewItem,self).event_stateChange()
if announceContains:
speech.speakMessage(_("%s items")%self.childCount)
class BrokenCommctrl5Item(IAccessible):
"""Handle broken CommCtrl v5 SysTreeView32 items in 64 bit applications.
In these controls, IAccessible fails to retrieve any info, so we must retrieve it using UIA.
We do this by obtaining a UIA NVDAObject and redirecting properties to it.
We can't simply use UIA objects alone for these controls because UIA events are also broken.
"""
def __init__(self, _uiaObj=None, **kwargs):
# This class is being directly instantiated.
if not _uiaObj:
raise ValueError("Cannot instantiate directly without supplying _uiaObj")
self._uiaObj = _uiaObj
super(BrokenCommctrl5Item, self).__init__(**kwargs)
def initOverlayClass(self):
self._uiaObj = None
if UIAHandler.handler:
parent=super(BrokenCommctrl5Item, self).parent
if parent and parent.hasFocus:
try:
kwargs = {}
UIA.kwargsFromSuper(kwargs, relation="focus")
self._uiaObj = UIA(**kwargs)
except:
log.debugWarning("Retrieving UIA focus failed", exc_info=True)
def _get_role(self):
return self._uiaObj.role if self._uiaObj else controlTypes.ROLE_UNKNOWN
def _get_name(self):
return self._uiaObj.name if self._uiaObj else None
def _get_description(self):
return self._uiaObj.description if self._uiaObj else None
def _get_value(self):
return self._uiaObj.value if self._uiaObj else None
def _get_states(self):
return self._uiaObj.states if self._uiaObj else set()
def _get_po
|
franklinsales/udacity-data-analyst-nanodegree
|
project3/class-works/data-wrangling/data-in-more-complex-formats/quiz-extracting-data-corrected.py
|
Python
|
mit
| 1,669 | 0.001797 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 23:06:23 2017
@author: franklin
"""
# Your task here is to extract data from xml on authors of an article
# and add it to a list, one item for an author.
# See the provided data structure for the expected format.
# The tags for first name, surname and email should map directly
# to the dictionary keys
import xml.etree.ElementTree as ET
article_file = "data/exampleresearcharticle.xml"
def get_root(fname):
tree = ET.parse(fname)
return tree.getroot()
def get_author(root):
authors = []
for author in root.findall('./fm/bibl/aug/au'):
data = {
"fnm": None,
"snm": None,
"email": None
}
data["fnm"] = author.find('./fnm').text
data["snm"] = author.find('./snm').text
data["email"] = author.find('./email').text
authors.append(data)
return author
|
s
def test():
solution = [{'fnm': 'Omer', 'snm': 'Mei-Dan', 'email': 'omer@extremegate.com'}, {'fnm': 'Mike', 'snm': 'Carmont', 'email': 'mcarmont@hotmail.com'}, {'fnm': 'Lior', 'snm': 'Laver', 'email': 'laver17@gmail.com'}, {'fnm': 'Meir', 'snm': 'Nyska', 'email': 'nyska@internet-zahav.net'}, {'fnm': 'Hagay', 'sn
|
m': 'Kammar', 'email': 'kammarh@gmail.com'}, {'fnm': 'Gideon', 'snm': 'Mann', 'email': 'gideon.mann.md@gmail.com'}, {'fnm': 'Barnaby', 'snm': 'Clarck', 'email': 'barns.nz@gmail.com'}, {'fnm': 'Eugene', 'snm': 'Kots', 'email': 'eukots@gmail.com'}]
root = get_root(article_file)
data = get_author(root)
assert data[0] == solution[0]
assert data[1]["fnm"] == solution[1]["fnm"]
test()
|
openilabs/falconlab
|
env/lib/python2.7/site-packages/falcon/cmd/bench.py
|
Python
|
mit
| 944 | 0 |
"""Benchmark runner.
Copyright 2013 by Rackspace Hosting, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You m
|
ay obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless requir
|
ed by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from falcon import bench
def fail(returncode, e):
sys.stderr.write('\nERROR: %s\n\n' % e)
sys.exit(returncode)
def main():
try:
bench.main()
except KeyboardInterrupt:
fail(1, 'Interrupted, terminating benchmark')
except RuntimeError as e:
fail(1, e)
if __name__ == '__main__':
main()
|
EMS-TU-Ilmenau/fastmat
|
fastmat/version.py
|
Python
|
apache-2.0
| 245 | 0 |
#
|
-*- coding: utf-8 -*-
# This file carries the module's version information which will be updated
# during execution of the installation script, setup.py. Distribution tarballs
# contain a pre-generated copy of this file.
__version__ = '0
|
.2'
|
dadavidson/Python_Lab
|
Python-w3resource/Python_Basic/ex01.py
|
Python
|
mit
| 715 | 0.00979 |
# https://www.w3resource.com/python-exe
|
rcises/
# 1. Write a Python program to print the following string in a specific format (see the output).
# Sample String : "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond
# in the sky. Twinkle, twinkle, little star, How I wonder what you are" Output :
# Twinkle, twinkle, little star,
# How I wonder what you are!
# Up above the world so high,
# Like a diamond in the sky.
# Twinkle, twinkle, little star,
# How I wonder what
|
you are
string = """
Twinkle, twinkle, little star,
\t\tUp above the world so high,
\t\tLike a diamond in the sky.
Twinkle, twinkle, little star,
\tHow I wonder what you are
"""
print string
|
inconvergent/differential_ani
|
differential.py
|
Python
|
mit
| 7,824 | 0.034126 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cairo, Image
import gtk, gobject
from numpy import cos, sin, pi, sqrt, sort, square,array, zeros, diff,\
column_stack,ones, reshape, linspace, arctan2
from numpy import sum as npsum
from numpy.random import random, seed
from itertools import count
from speedup.speedup import pyx_collision_reject
#from speedup.speedup import pyx_growth_branch
from speedup.speedup import pyx_growth
from speedup.speedup import pyx_segment_attract
seed(4)
FNAME = './img/xx'
BACK = [1]*3
FRONT = [0,0,0,0.7]
CONTRASTA = [0.84,0.37,0] # orange
CONTRASTB = [0.53,0.53,1] # lightblue
CONTRASTC = [0.84,1,0]
PI = pi
TWOPI = 2.*pi
NMAX = 2*1e8
SIZE = 1500
ONE = 1./SIZE
STP = ONE*0.9
FARL = 30.*ONE
NEARL = 4.*ONE
GROW_NEAR_LIMIT = NEARL
MID = 0.5
LINEWIDTH = 3.*ONE
####
RENDER_ITT = 2000 # redraw this often
ZONEWIDTH = FARL/ONE
ZONES = int(SIZE/ZONEWIDTH)
class Render(object):
def __init__(self,n):
self.n = n
self.__init_cairo()
window = gtk.Window()
window.resize(self.n, self.n)
window.connect("destroy", self.__write_image_and_exit)
darea = gtk.DrawingArea()
darea.connect("expose-event", self.expose)
window.add(darea)
window.show_all()
self.darea = darea
self.num_img = 0
def clear_canvas(self):
self.ctx.set_source_rgb(*BACK)
self.ctx.rectangle(0,0,1,1)
self.ctx.fill()
def __write_image_and_exit(self,*args):
self.sur.write_to_png('on_exit.png')
gtk.main_quit(*args)
def __init_cairo(self):
sur = cairo.ImageSurface(cairo.FORMAT_ARGB32,self.n,self.n)
ctx = cairo.Context(sur)
ctx.scale(self.n,self.n)
ctx.set_source_rgb(*BACK)
ctx.rectangle(0,0,1,1)
ctx.fill()
self.sur = sur
self.ctx = ctx
def init_step(self,e):
self.step = e
#gobject.timeout_add(5,self.step_wrap)
gobject.idle_add(self.step_wrap)
self.steps = 0
def line(self,x1,y1,x2,y2):
self.ctx.set_source_rgba(*FRONT)
self.ctx.move_to(x1,y1)
self.ctx.line_to(x2,y2)
self.ctx.stroke()
def circle(self,x,y,r,fill=False):
self.ctx.arc(x,y,r,0,pi*2.)
if fill:
self.ctx.fill()
else:
self.ctx.stroke()
def circles(self,xx,yy,rr,fill=False):
if fill:
action = self.ctx.fill
else:
action = self.ctx.stroke
for x,y,r in zip(xx,yy,rr):
self.ctx.arc(x,y,r,0,TWOPI)
action()
def circle_stroke(self,x1,y1,x2,y2,r):
dx = x1-x2
dy = y1-y2
dd = sqrt(dx*dx+dy*dy)
n = int(dd/ONE)
n = n if n>2 else 2
a = arctan2(dy,dx)
scale = linspace(0,dd,n)
xp = x1-scale*cos(a)
yp = y1-scale*sin(a)
for x,y in zip(xp,yp):
self.ctx.arc(x,y,r,0,pi*2.)
self.ctx.fill()
def expose(self,*args):
cr = self.darea.window.cairo_create()
cr.set_source_surface(self.sur,0,0)
cr.paint()
def step_wrap(self,*args):
res = self.step()
self.steps += 1
if not self.steps%RENDER_ITT:
self.expose()
return res
class Line(object):
def __init__(self):
self.X = zeros((NMAX,2),'float')
self.SV = zeros((NMAX,2),'int')
self.SVMASK = zeros(NMAX,'int')
self.VS = {}
self.vnum = 0
self.snum = 0
self.sind = 0
self.ZV = [[] for i in xrange((ZONES+2)**2)]
self.VZ = zeros(NMAX,'int')
def _add_vertex(self,x):
"""
add vertex loctated at x.
zone maps are updated.
"""
vnum = self.vnum
self.X[vnum,:] = x
z = get_z(x,ZONES)
self.ZV[z].append(vnum)
self.VZ[vnum] = z
self.vnum += 1
return self.vnum-1
def update_zone_maps(self):
"""
check if vertices have changed zone, and update those that have.
"""
vnum = self.vnum
zz = get_zz(self.X[:vnum,:],ZONES)
mask = (zz != self.VZ[:vnum]).nonzero()[0]
for bad_v in mask:
new_z = zz[bad_v]
old_z = self.VZ[bad_v]
new = [v for v in self.ZV[old_z] if v != bad_v]
self.ZV[old_z] = new
self.ZV[new_z].append(bad_v)
self.VZ[mask] = zz[mask]
def _add_segment(self,a,b):
"""
add new segment between vertices a,b.
"""
self.SV[self.sind,:] = [a,b]
self.SVMASK[self.sind] = 1
add = make_dict_list_add(self.VS)
add(a,self.sind)
add(b,self.sind)
self.sind += 1
self.snum += 1
return self.sind-1
def _add_vertex_segment(self,x,a):
"""
add new vertex x connected to vertex a with a new segment.
"""
v = self._add_vertex(x)
self._add_segment(v,a)
def _delete_segment(self,a):
"""
delete segment a and related connections.
"""
vv = self.SV[a,:]
self.SVMASK[a] = 0
self.snum -= 1
for v in vv:
if self.VS.has_key(v):
vs = [s for s in self.VS[v] if s!=a]
if len(vs)>0:
self.VS[v] = vs
else:
del(self.VS[v])
return vv
def split_segment(self,a):
"""
add new vertex, v, at the middle of segment a with vertices: [v0,v1]
creates new segments b,c such that: v0 -b- v -c- v1
"""
vv = self.SV[a,:]
midx = (self.X[vv[1],0] + self.X[vv[0],0])*0.5
midy = (self.X[vv[1],1] + self.X[vv[0],1])*0.5
#TODO: improve
newv = self._add_vertex([midx,midy])
self._delete_segment(a)
b = self._add_segment(vv[0],newv)
c = self._add_segment(vv[1],newv)
return newv, [b,c]
def make_dict_list_add(d):
def add(k,v):
if d.has_key(k):
d[k].append(v)
else:
d[k] = [v]
return add
def get_z(x,nz):
"""
find zone z of x. we have nz zones in each direction.
"""
i = 1+int(x[0]*nz)
j = 1+int(x[1]*nz)
z = i*nz+j
return z
def get_zz(xx,nz):
"""
same as get_z for a vector of points.
"""
ij = (xx*nz).astype('int')
zz = ij[:,0]*(nz+2) + ij[:,1]+1
return zz
def init_circle(l,ix,iy,r,n):
th = sort(random(n)*TWOPI)
rad = (0.9 + 0.1*(0.5-random(n)))*r
xx = column_stack( (ix+cos(th)*rad, iy+sin(th)*rad) )
vv = []
for x in xx:
vv.append(l._add_vertex(x))
for i in xrange(len(vv)-1):
seg = l._add_segment(vv[i],vv[i+1])
l._add_segment(vv[0],vv[-1])
def init_horizontal_line(l,x1,x2,y1,y2,n):
x = sort(x1+(x2-x1)*random(n))
y = y1 + (y2-y1)*random(n)
xx = column_stack((x,y))
vv = []
for x in xx:
vv.append(l._add_vertex(x))
for i in xrange(len(vv)-1):
seg = l._add_segment(vv[i],vv[i+1])
if i == 0:
first = seg
def main():
L = Line()
render = Render(SIZE)
init_circle(L,MID,MID,0.001,50)
#init_horizontal_line(L,MID-0.2,MID+0.2,MID-0.001,MID+0.001,100)
SX = zeros((NMAX,2),'float')
def show(render,l):
render.clear_canvas()
render.ctx.set_source_rgba(*FRONT)
render.ctx.set_line_width(LINEWIDTH)
for vv in l.SV[:l.sind,:][l.SVMASK[:l.sind]>0,:]:
render.circle_stroke(l.X[vv[0],0],l.X[vv[0],1],
l.X[vv[1],0],l.X[vv[1],1], ONE*2)
def step():
rnd1 = random(L.sind)
#rnd2 = random(L.sind)
pyx_growth(L,r
|
nd1,GRO
|
W_NEAR_LIMIT)
#pyx_growth_branch(L,rnd1,rnd2,GROW_NEAR_LIMIT)
L.update_zone_maps()
if not render.steps%RENDER_ITT:
show(render,L)
print 'steps:',render.steps,'vnum:',L.vnum,'snum:',L.snum
fn = '{:s}_nearl{:0.0f}_itt{:07d}.png'
fn = fn.format(FNAME,FARL/ONE,render.steps)
render.sur.write_to_png(fn)
vnum = L.vnum
SX[:vnum,:] = 0.
pyx_segment_attract(L,SX[:vnum,:],NEARL)
pyx_collision_reject(L,SX[:vnum,:],FARL,ZONES)
SX[:vnum,:] *= STP
L.X[:vnum,:] += SX[:vnum,:]
return True
render.init_step(step)
gtk.main()
if __name__ == '__main__' :
if True:
import pstats, cProfile
fn = './profile/profile'
cProfile.run('main()',fn)
p = pstats.Stats(fn)
p.strip_dirs().sort_stats('cumulative').print_stats()
else:
main()
|
OriHoch/pysiogame
|
game_boards/game054.py
|
Python
|
gpl-3.0
| 7,891 | 0.034216 |
# -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.board
import random
import math
import pygame
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,999,1)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,11,9)
def create_game_objects(self, level = 1):
self.board.draw_grid = False
color = ex.hsv_to_rgb(225,15,235)
color2 = (255,255,255)
self.col_r = (255,0,0)
self.col_g = (0,255,0)
self.col_b = (0,0,255)
self.col_k = (0,0,0)
self.col_e = (255,255,255)
colorkey = (2,2,2)
self.col_bg = (255,255,255) #self.col_k #(255,246,219)
data = [32,23]
#stretch width to fit the screen size
x_count = self.get_x_count(data[1],even=True)
if x_count > 32:
data[0] = x_count
self.data = data
self.points = 20
self.vis_buttons = [1,0,0,0,1,1,1,0,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0],data[1])
scale = self.layout.scale
self.board.level_start(data[0],data[1],scale)
self.board.board_bg.initcolor = self.col_bg
self.board.board_bg.color = self.col_bg
self.board.board_bg.update_me = True
self.board.moved = self.moved
self.choice_list = []
step = 255 / 20.0
for i in range(21):
self.choice_list.append(int(255 - i*step))
self.picked = []
for i in range(3):
self.picked.append(self.choice_list[random.randrange(0,len(self.choice_list))])
y = data[1]-3
self.rgb_g = [y,y,y]
self.rgbx3 = [self.col_k,self.col_k,self.col_k]
self.board.add_unit(1,y,2,3,classes.board.ImgAlphaShip,"",self.col_r,"light_r.png")
self.board.add_unit(4,y,2,3,classes.board.ImgAlphaShip,"",self.col_g,"light_g.png")
self.board.add_unit(7,y,2,3,classes.board.ImgAlphaShip,"",self.col_b,"light_b.png")
for each in self.board.ships:
each.outline = False
each.audible = False
each.image.set_colorkey(each.initcolor)
#add colour circles - canvas
self.board.add_unit(10,0,data[0]-10,data[1],classes.board.Label,"",self.col_e,"",0)
self.canvas = self.board.units[0]
self.canvas_center = [(self.canvas.grid_w*self.board.scale)//2,(self.canvas.grid_h*self.board.scale)//2]
#adding borders between the colour tubes
self.board.add_unit(0,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
self.board.add_unit(3,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
self.board.add_unit(6,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
self.board.add_unit(9,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
#adding colour guides
self.board.add_door(1,0,2,data[1],classes.board.Door,"",color,"",0)
self.board.units[-1].set_outline(self.col_r, 1)
self.board.add_door(4,0,2,data[1],classes.board.Door,"",color,"",0)
self.board.units[-1].set_outline(self.col_g, 1)
self.board.add_door(7,0,2,data[1],classes.board.Door,"",color,"",0)
self.board.units[-1].set_outline(self.col_b, 1)
#adding colour strips
self.board.add_door(1,data[1]-1,2,1,classes.board.Door,"",self.col_r,"",0)
self.board.add_door(4,data[1]-1,2,1,classes.board.Door,"",self.col_g,"",0)
self.board.add_door(7,data[1]-1,2,1,classes.board.Door,"",self.col_b,"",0)
#black background
self.board.add_door(1,0,2,data[1],classes.board.Door,"",self.col_k,"",0)
self.board.units[-1].image.set_colorkey(None)
self.board.add_door(4,0,2,data[1],classes
|
.board.Door,"",self.col_k,"",0)
self.board.units[-1].image.set_colorkey(None)
self.board.add_door(7,0,2,data[1],classes.board.Door,"",self.col_k,"",0)
self.board.units[-1].image.set_colorkey(None)
for i in [5,6,7,8,9,10,11,12,13]:
if i>7:
self.board.units[i].image.set_colorkey(colork
|
ey)
self.board.all_sprites_list.move_to_back(self.board.units[i])
else:
self.board.all_sprites_list.move_to_front(self.board.units[i])
self.canvas.set_outline((255,75,0),1)
self.canv = []
for i in range(4):
self.canv.append(pygame.Surface([self.canvas.grid_w*self.board.scale, self.canvas.grid_h*self.board.scale-1]))
self.board.all_sprites_list.move_to_back(self.board.board_bg)
self.mix()
def mix(self):
for i in range(3):
self.rgb_g[i] = self.board.ships[i].grid_y
self.update_sliders()
self.canv[3].fill(self.col_k)
ct = self.canvas_center
radius = 9*self.board.scale
radius2 = 5*self.board.scale
x = 1*self.board.scale
rect = [[ct[0],ct[1]-x],[ct[0]-x,ct[1]+x],[ct[0]+x,ct[1]+x]]
for i in range(3):
pygame.draw.circle(self.canv[i], self.rgbx3[i], rect[i], radius, 0)
self.canv[3].blit(self.canv[i],[0,0],special_flags = pygame.BLEND_ADD)
pygame.draw.circle(self.canv[3], self.picked, ct, radius2, 0)
self.canvas.painting = self.canv[3].copy()
self.canvas.update_me = True
def update_sliders(self):
for i in range(3):
strip = self.board.units[i+8]
strip.grid_y = self.rgb_g[i]+3-3
strip.grid_h = self.data[1]-strip.grid_y+3
col = []
for each in strip.initcolor:
if each > 0:
if strip.grid_y == 20:
col.append(0)
elif strip.grid_y == 0:
col.append(255)
else:
step = 255 / 20.0
col.append(int(255 - (strip.grid_y) * step))
else:
col.append(0)
self.rgbx3[i] = col
strip.color = col
strip.pos_update()
strip.update_me = True
def moved(self):
self.mix()
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
def update(self,game):
game.fill((0,0,0))
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
r = self.rgbx3[0][0]
g = self.rgbx3[1][1]
b = self.rgbx3[2][2]
if self.picked != [r,g,b]:
help = ""
if self.picked[0] > r:
help += self.dp['more red'] + ", "
elif self.picked[0] < r:
help += self.dp['less red'] + ", "
else:
help += self.dp['red is ok'] + ", "
if self.picked[1] > g:
help += self.dp['more green'] + ", "
elif self.picked[1] < g:
help += self.dp['less green'] + ", "
else:
help += self.dp['green is ok'] + ", "
if self.picked[2] > b:
help += self.dp['more blue'] + ". "
elif self.picked[2] < b:
help += self.dp['less blue'] + ". "
else:
help += self.dp['blue is ok'] + ". "
self.say(help)
if self.points > 0:
self.points -= 1
self.level.try_again(silent = self.mainloop.speaker.talkative)
else:
self.update_score(self.points)
self.level.next_board()
|
box/ClusterRunner
|
test/unit/project_type/test_git.py
|
Python
|
apache-2.0
| 13,290 | 0.004138 |
from os.path import join, expanduser
from subprocess import Popen
from unittest import skipIf
from unittest.mock import ANY, call, MagicMock, Mock
from genty import genty, genty_dataset
import re
from app.project_type.git import Git
from app.util.conf.configuration import Configuration
from app.util.process_utils import is_windows, get_environment_variable_setter_command
from test.framework.base_unit_test_case import BaseUnitTestCase
from test.framework.comparators import AnyStringMatching
@genty
class TestGit(BaseUnitTestCase):
def setUp(self):
super().setUp()
self.patch('app.project_type.git.fs.create_dir')
self.patch('os.unlink')
self.patch('os.symlink')
self.os_path_exists_mock = self.patch('app.project_type.git.os.path.exists')
self.os_path_exists_mock.return_value = False
self.os_path_isfile_mock = self.patch('app.project_type.git.os.path.isfile')
self.os_path_isfile_mock.return_value = False
def test_timing_f
|
ile_path_happy_path(self):
git_env = Git("ssh://scm.dev.box.net/box/www/current", 'origin', 'refs/changes/78/151978/27')
actual_timing_file_sys_path = git_env.timing_file_path('QUnit')
expected_timing_file_sys_path = join(
Configuration[
|
'base_directory'],
'timings',
'master',
'scm.dev.box.net',
'box',
'www',
'current',
'QUnit.timing.json',
)
self.assertEquals(expected_timing_file_sys_path, actual_timing_file_sys_path)
def test_execute_command_in_project_specifies_cwd_if_exists(self):
self.os_path_exists_mock.return_value = True
project_type_popen_patch = self._patch_popen()
fake_project_directory = 'proj_dir'
fake_command = 'some_command'
git_env = Git("ssh://scm.dev.box.net/box/www/current", 'origin', 'refs/changes/78/151978/27')
git_env.project_directory = fake_project_directory
git_env.execute_command_in_project(fake_command)
env_setter = get_environment_variable_setter_command('PROJECT_DIR', fake_project_directory)
project_type_popen_patch.assert_called_once_with(
'{} {}'.format(env_setter, fake_command),
cwd=fake_project_directory,
shell=ANY,
stdout=ANY,
stderr=ANY,
start_new_session=ANY,
)
def test_execute_command_in_project_type_specifies_cwd_if_doesnt_exist(self):
project_type_popen_patch = self._patch_popen()
fake_project_directory = 'proj_dir'
fake_command = 'some_command'
git_env = Git("ssh://scm.dev.box.net/box/www/current", 'origin', 'refs/changes/78/151978/27')
git_env.project_directory = fake_project_directory
git_env.execute_command_in_project(fake_command)
env_setter = get_environment_variable_setter_command('PROJECT_DIR', fake_project_directory)
project_type_popen_patch.assert_called_once_with(
'{} {}'.format(env_setter, fake_command),
cwd=None,
shell=ANY,
stdout=ANY,
stderr=ANY,
start_new_session=ANY,
)
@genty_dataset(
regular_path=(
'http://scm.example.com/path/to/project',
join('scm.example.com', 'path', 'to', 'project')
),
with_netloc=(
'ssh://scm.dev.box.net:12345/awesome-project',
join('scm.dev.box.net12345', 'awesomeproject')
),
no_netloc=(
'git.dev.box.net:Productivity/ClusterRunnerHealthCheck',
join('git.dev.box.net', 'Productivity', 'ClusterRunnerHealthCheck')
),
)
def test_get_full_repo_directory(self, url, expected_repo_path_without_base):
Configuration['repo_directory'] = join(expanduser('~'), '.clusterrunner', 'repos')
expected_repo_path = join(
Configuration['repo_directory'],
expected_repo_path_without_base,
)
actual_repo_path = Git.get_full_repo_directory(url)
self.assertEqual(expected_repo_path, actual_repo_path)
def test_get_timing_file_directory(self):
Configuration['timings_directory'] = join(expanduser('~'), '.clusterrunner', 'timing')
url = 'http://scm.example.com/path/to/project'
actual_timings_sys_path = Git.get_timing_file_directory(url)
expected_timings_sys_path = join(
Configuration['timings_directory'],
'scm.example.com',
'path',
'to',
'project',
)
self.assertEqual(expected_timings_sys_path, actual_timings_sys_path)
def test_get_repo_directory_removes_colon_from_directory_if_exists(self):
Configuration['repo_directory'] = join(expanduser('~'), 'tmp', 'repos')
git = Git("some_remote_value", 'origin', 'ref/to/some/branch')
actual_repo_directory = git.get_full_repo_directory('ssh://source_control.cr.com:1234/master')
expected_repo_directory = join(
Configuration['repo_directory'],
'source_control.cr.com1234',
'master'
)
self.assertEqual(expected_repo_directory, actual_repo_directory)
def test_get_timing_file_directory_removes_colon_from_directory_if_exists(self):
Configuration['timings_directory'] = join(expanduser('~'), 'tmp', 'timings')
git = Git("some_remote_value", 'origin', 'ref/to/some/branch')
actual_timing_directory = git.get_timing_file_directory('ssh://source_control.cr.com:1234/master')
expected_timing_directory = join(
Configuration['timings_directory'],
'source_control.cr.com1234',
'master',
)
self.assertEqual(expected_timing_directory, actual_timing_directory)
@genty_dataset(
shallow_clone_false=(False, True),
shallow_clone_true=(True, False),
)
def test_fetch_project_with_pre_shallow_cloned_repo(self, shallow_clone, should_delete_clone):
Configuration['shallow_clones'] = shallow_clone
self.os_path_isfile_mock.return_value = True
self.os_path_exists_mock.return_value = True
mock_fs = self.patch('app.project_type.git.fs')
mock_rmtree = self.patch('shutil.rmtree')
git = Git('url')
git._repo_directory = 'fake/repo_path'
git._execute_and_raise_on_failure = MagicMock()
git.execute_command_in_project = Mock(return_value=('', 0))
mock_fs.create_dir.call_count = 0 # only measure calls made in _fetch_project
mock_rmtree.call_count = 0
git._fetch_project()
if should_delete_clone:
mock_rmtree.assert_called_once_with('fake/repo_path')
else:
self.assertFalse(mock_rmtree.called)
@genty_dataset(
failed_rev_parse=(1, True),
successful_rev_parse=(0, False),
)
def test_repo_is_cloned_if_and_only_if_rev_parse_fails(self, rev_parse_return_code, expect_git_clone_call):
mock_popen = self._patch_popen({
'git rev-parse$': _FakePopenResult(return_code=rev_parse_return_code)
})
Configuration['repo_directory'] = '/repo-directory'
git = Git(url='http://original-user-specified-url.test/repo-path/repo-name')
git.fetch_project()
git_clone_call = call(AnyStringMatching('git clone'), start_new_session=ANY,
stdout=ANY, stderr=ANY, cwd=ANY, shell=ANY)
if expect_git_clone_call:
self.assertIn(git_clone_call, mock_popen.call_args_list, 'If "git rev-parse" returns a failing exit code, '
'"git clone" should be called.')
else:
self.assertNotIn(git_clone_call, mock_popen.call_args_list, 'If "git rev-parse" returns a successful exit '
'code, "git clone" should not be called.')
@genty_dataset(
shallow_clone=(True,),
no_shallow_clone=(False,),
)
def test_fetch_project_passes_depth_parameter_for_shallow_clone
|
JeroenDeDauw/phpstat
|
src/phpstat/dirinfo.py
|
Python
|
gpl-3.0
| 4,098 | 0.006833 |
'''
Created on Mar 22, 2011
@author: jeroen
'''
import os
from fileinfo import FileInfo
from bytesize import ByteSize
class DirInfo(object):
'''
Simple class to represent a directory and obtain data about if when needed.
'''
def __init__(self, path, recursive=False):
'''
Constructor
'''
self._path = path
self._initiated = False
self._recursive = recursive
self._files = []
self._dirs = []
self._filecount = 0
self._dircount = 0
self._totalsize = -1
self._codelines = 0
self._commentlines = 0
self._whitespacelines = 0
'''
Check if the dir data is cached, and if not, obtain it.
'''
def _init_if_needed(self):
if not self._initiated:
self._initiated = True
self._get_dir_info(self._path)
def _get_dir_info(self, rootdir):
for item in os.listdir(rootdir):
fullname = os.path.join(rootdir, item)
if not item.startswith('.') and not os.path.islink(fullname):
if os.path.isdir(fullname):
dir = DirInfo(fullname, self._recursive)
self._dirs.append(dir)
self._dircount += 1
if self._recursive:
self._filecount += dir.get_filecount()
self._dircount += dir.get_dircount()
self._totalsize += dir.get_totalsize()
self._codelines += dir.get_code_lines()
self._commentlines += dir.get_comment_lines()
self._whitespacelines += dir.get_whitespace_lines()
else:
file = FileInfo(rootdir, item)
self._files.append(file)
self._filecount += 1
self._totalsize += file.get_filesize()
self._codelines += file.get_code_lines()
self._commentlines += file.get_comment_lines()
self._whitespacelines += file.get_whitespace_lines()
def __repr__(self, recursive=None):
self.set_recursive(recursive)
self._init_if_needed()
return "%s (%s dirs, %s files, %s lines: %s code, %s comment, %s empty) %s" % (
self._path,
self._dircount,
self._filecount,
self.get_line_count(),
self.get_code_lines(),
self.get_comment_lines(),
self.get_whitespace_lines(),
ByteSize(self._totalsize).__repr__()
)
'''
Sets that the directory should report data obtained recursivly,
or only look at what's directly in it. Note that changing the
recursive setting invalidates the cached info.
'''
def set_recursive(self, recursive):
if recursive is not None and recursive != self._recursive:
self._recursive = recursive
self._initiated = False
def get_files(self):
self._init_if_needed()
return self._files
def get_dirs(self):
self._init_if_needed()
return self._dirs
def get_path(self):
return self._path
def get_totalsize(self):
self._init_if_needed()
return self._totalsize
def get_code_lines(self):
self._init_if_needed()
return self._codelines
def get_comment_lines(self):
self._init_if_needed()
return self._commentlines
def get_whitespace_lines(self):
self._init_if_needed()
return self._whitespacelines
def get_line_count(self):
self._in
|
it_if_needed()
return self._codelines + self._commentlines + self._whitespacelines
def get_filecount(self):
self._init_if_needed()
return self._filecount
def get_dirc
|
ount(self):
self._init_if_needed()
return self._dircount
|
luoshao23/ML_algorithm
|
Decission_Tree/tree.py
|
Python
|
mit
| 7,395 | 0.002299 |
from math import log
from PIL import Image, ImageDraw
my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
['google', 'France', 'yes', 23, 'Premium'],
['digg', 'USA', 'yes', 24, 'Basic'],
['kiwitobes', 'France', 'yes', 23, 'Basic'],
['google', 'UK', 'no', 21, 'Premium'],
['(direct)', 'New Zealand', 'no', 12, 'None'],
['(direct)',
|
'UK', 'no', 21, 'Basic'],
['google', 'USA', 'no', 24, 'Premium'],
['slashdot', 'France', 'yes', 19, 'None'],
['digg', 'USA', 'no', 18, 'None'],
|
['google', 'UK', 'no', 18, 'None'],
['kiwitobes', 'UK', 'no', 19, 'None'],
['digg', 'New Zealand', 'yes', 12, 'Basic'],
['slashdot', 'UK', 'no', 21, 'None'],
['google', 'UK', 'yes', 18, 'Basic'],
['kiwitobes', 'France', 'yes', 19, 'Basic']]
class decisionnode(object):
"""docstring for decisionnode"""
def __init__(self, col=-1, value=None, results=None, tb=None, fb=None):
self.col = col
self.value = value
self.results = results
self.tb = tb
self.fb = fb
def divideset(rows, column, value):
split_function = None
if isinstance(value, int) or isinstance(value, float):
split_function = lambda row: row[column] >= value
else:
split_function = lambda row: row[column] == value
set1 = [row for row in rows if split_function(row)]
set2 = [row for row in rows if not split_function(row)]
return (set1, set2)
def uniquecounts(rows):
results = {}
for row in rows:
r = row[-1]
results.setdefault(r, 0)
results[r] += 1
return results
def giniimpurity(rows):
total = len(rows)
counts = uniquecounts(rows)
imp = 0
for k1 in counts:
p1 = float(counts[k1]) / total
imp += p1 * (1 - p1)
return imp
def entropy(rows):
total = len(rows)
log2 = lambda x: log(x) / log(2)
results = uniquecounts(rows)
ent = 0.0
for r in results.keys():
p = float(results[r]) / total
ent -= p * log2(p)
return ent
def buildtree(rows, scoref=entropy):
if len(rows) == 0:
return decisionnode()
current_score = scoref(rows)
best_gain = 0.0
best_criteria = None
best_sets = None
column_count = len(rows[0]) - 1
for col in xrange(column_count):
column_values = {}
for row in rows:
column_values.setdefault(row[col], 1)
for value in column_values.keys():
(set1, set2) = divideset(rows, col, value)
p = float(len(set1)) / len(rows)
gain = current_score - p * scoref(set1) - (1 - p) * scoref(set2)
if gain > best_gain and len(set1) > 0 and len(set2) > 0:
best_gain = gain
best_criteria = (col, value)
best_sets = (set1, set2)
if best_gain > 0:
trueBranch = buildtree(best_sets[0])
falseBranch = buildtree(best_sets[1])
return decisionnode(col=best_criteria[0], value=best_criteria[1], tb=trueBranch, fb=falseBranch)
else:
return decisionnode(results=uniquecounts(rows))
def printtree(tree, indent=''):
if tree.results != None:
print str(tree.results)
else:
print '%s:%s?' % (str(tree.col), str(tree.value))
print indent + 'T->'
printtree(tree.tb, indent + '--')
print indent + 'F->'
printtree(tree.fb, indent + '--')
def getwidth(trees):
if trees.tb is None and trees.fb is None:
return 1
else:
return getwidth(trees.tb) + getwidth(trees.fb)
def getdepth(trees):
if trees.tb is None and trees.fb is None:
return 0
else:
return max(getdepth(trees.tb), getdepth(trees.fb)) + 1
def drawtrees(trees, jpeg='trees.jpg', widdelta=100, depdelta=100):
w = getwidth(trees) * widdelta
h = getdepth(trees) * depdelta + 120
img = Image.new('RGB', (w, h), (255, 255, 255))
draw = ImageDraw.Draw(img)
drawnode(draw, trees, w / 2, 20, widdelta, depdelta)
img.save(jpeg, 'JPEG')
def drawnode(draw, trees, x, y, widdelta=100, depdelta=100):
if trees.results is None:
wf = getwidth(trees.fb) * widdelta
wt = getwidth(trees.tb) * widdelta
left = x - (wf + wt) / 2
right = x + (wf + wt) / 2
if isinstance(trees.value, int) or isinstance(trees.value, float):
draw.text((x - 20, y - 10), '%s:>=%s?\n' %
(str(trees.col), str(trees.value)), (0, 0, 0))
else:
draw.text((x - 20, y - 10), '%s:==%s?\n' %
(str(trees.col), str(trees.value)), (0, 0, 0))
draw.line((x, y, left + wf / 2, y + depdelta), fill=(255, 0, 0))
draw.line((x, y, right - wt / 2, y + depdelta), fill=(255, 0, 0))
drawnode(draw, trees.fb, left + wf / 2,
y + depdelta, widdelta, depdelta)
drawnode(draw, trees.tb, right - wt / 2,
y + depdelta, widdelta, depdelta)
else:
txt = ' \n'.join(['%s:%d' % v for v in trees.results.items()])
draw.text((x - 20, y), txt, (0, 0, 0))
def classify(obs, tree):
if tree.results is not None:
return tree.results
else:
v = obs[tree.col]
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.tb
else:
branch = tree.fb
else:
if v == tree.value:
branch = tree.tb
else:
branch = tree.fb
return classify(obs, branch)
def prune(tree, mingain):
if tree.tb.results is None:
prune(tree.tb, mingain)
if tree.fb.results is None:
prune(tree.fb, mingain)
if tree.tb.results is not None and tree.fb.results is not None:
tb, fb = [], []
for v, c in tree.tb.results.items():
tb += [[v]] * c
for v, c in tree.fb.results.items():
fb += [[v]] * c
delta = entropy(tb + fb) - (entropy(tb) + entropy(fb)) / 2
if delta < mingain:
tree.tb, tree.fb = None, None
tree.results = uniquecounts(tb + fb)
def mdclassify(obs, tree):
if tree.results is not None:
return tree.results
else:
v=obs[tree.col]
if v is None:
tr, fr = mdclassify(obs, tree.tb), mdclassify(obs, tree.fb)
tcount = sum(tr.values())
fcount = sum(fr.values())
tw = float(tcount)/(tcount+fcount)
fw = float(fcount)/(tcount+fcount)
result = {}
for k,v in tr.items():
result.setdefault(k, v*tw)
for k,v in fr.items():
result.setdefault(k, 0)
result[k] += v*fw
return result
else:
if isinstance(v, int) or isinstance(v, float):
if v>=tree.value: branch = tree.tb
else: branch = tree.fb
else:
if v == tree.value: branch = tree.tb
else: branch = tree.fb
return mdclassify(obs, branch)
def variance(rows):
if len(rows)==0:
return 0
data = [float(row[-1]) for row in rows]
mean = sum(data)/len(data)
variance = sum([(d-mean)**2 for d in data])/len(data)
return variance
|
marineam/coil
|
coil/test/__init__.py
|
Python
|
mit
| 22 | 0 |
"""Test
|
s f
|
or coil."""
|
unicef/rhizome
|
rhizome/tests/test_agg.py
|
Python
|
agpl-3.0
| 38,994 | 0.001795 |
from django.contrib.auth.models import User
from pandas import read_csv, notnull, DataFrame
from numpy import isnan
from django.test import TestCase
from rhizome.models.campaign_models import Campaign, CampaignType, \
DataPointComputed, AggDataPoint
from rhizome.models.location_models import Location, LocationType, \
LocationTree
from rhizome.models.indicator_models import Indicator, IndicatorTag, \
IndicatorToTag, CalculatedIndicatorComponent
from rhizome.models.document_models import Document, SourceSubmission
from rhizome.models.datapoint_models import DataPoint
from rhizome.cache_meta import LocationTreeCache
from rhizome.tests.setup_helpers import TestSetupHelpers
class AggRefreshTestCase(TestCase):
'''
'''
def __init__(self, *args, **kwargs):
super(AggRefreshTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self.ts = TestSetupHelpers()
data_df = read_csv('rhizome/tests/_data/calc_data.csv')
self.create_metadata()
self.user = User.objects.get(username="test")
self.test_df = data_df[data_df['is_raw'] == 1]
self.target_df = data_df[data_df['is_raw'] == 0]
self.campaign_id = Campaign.objects.all()[0].id
self.top_lvl_location = Location.objects.filter(name='Nigeria')[0]
ltr = LocationTreeCache()
ltr.main()
def create_metadata(self):
'''
Creating the Indicator, location, Campaign, meta data needed for the
system to aggregate / caclulate.
'''
read_csv('rhizome/tests/_data/campaigns.csv')
location_df = read_csv('rhizome/tests/_data/locations.csv')
indicator_df = read_csv('rhizome/tests/_data/indicators.csv')
user_id = User.objects.create_user('test', 'john@john.com', 'test').id
self.location_type1 = LocationType.objects.create(admin_level=0,
name="country", id=1)
self.location_type2 = LocationType.objects.create(admin_level=1,
name="province", id=2)
campaign_type1 = CampaignType.objects.create(name='test')
self.locations = self.model_df_to_data(location_df, Location)
self.indicators = self.model_df_to_data(indicator_df, Indicator)
ind_tag = IndicatorTag.objects.create(tag_name='Polio')
sub_tag = IndicatorTag.objects.create(tag_name='Polio Management',
parent_tag_id=ind_tag.id)
ind_to_tag_batch = [IndicatorToTag(
**{'indicator_tag_id': sub_tag.id, 'indicator_id': ind.id}) for ind in self.indicators]
IndicatorToTag.objects.bulk_create(ind_to_tag_batch)
|
self.campaign_id = Campaign.objects.create(
start_date='2016-01-01',
end_date='2016-01-0
|
2',
campaign_type_id=campaign_type1.id
).id
document = Document.objects.create(
doc_title='test',
created_by_id=user_id,
guid='test')
self.ss = SourceSubmission.objects.create(
document_id=document.id,
submission_json='',
row_number=0,
data_date='2016-01-01'
).id
def model_df_to_data(self, model_df, model):
meta_ids = []
non_null_df = model_df.where((notnull(model_df)), None)
list_of_dicts = non_null_df.transpose().to_dict()
for row_ix, row_dict in list_of_dicts.iteritems():
row_id = model.objects.create(**row_dict)
meta_ids.append(row_id)
return meta_ids
def create_raw_datapoints(self):
for row_ix, row_data in self.test_df.iterrows():
dp_id = self.create_datapoint(row_data.location_id, row_data
.data_date, row_data.indicator_id, row_data.value)
# def create_datapoint(self, **kwargs):
def create_datapoint(self, location_id, data_date, indicator_id, value):
'''
Right now this is being performed as a database insert. I would like to
Test this against the data entry resource, but this will do for now
in order to test caching.
'''
document_id = Document.objects.get(doc_title='test').id
ss_id = SourceSubmission.objects.get(document_id=document_id).id
dp = DataPoint.objects.create(
location_id=location_id,
data_date=data_date,
indicator_id=indicator_id,
campaign_id=self.campaign_id,
value=value,
source_submission_id=ss_id,
unique_index=str(location_id) + str(data_date) +
str(self.campaign_id) + str(indicator_id)
)
return dp
def test_location_aggregation(self):
'''
Using the calc_data.csv, create a test_df and target_df. Ensure that
the aggregation and calcuation are working properly, but ingesting the
stored data, running the cache, and checking that the calculated data
for the aggregate location (parent location, in this case Nigeria) is as
expected.
In addition to the datapoints in the test file, i insert a null valu
to ensure that any null won't corrpupt the calculation.
python manage.py test rhizome.tests.test_agg.AggRefreshTestCase.
test_location_aggregation --settings=rhizome.settings.test
'''
self.create_raw_datapoints()
indicator_id, data_date, raw_location_id,\
agg_location_id, null_location_id, NaN_location_id = \
22, '2016-01-01', 12910, 12907, 12928, 12913
location_ids = Location.objects.filter(
parent_location_id=agg_location_id).values_list('id', flat=True)
DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id=null_location_id
).update(value=None)
DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id=NaN_location_id
).update(value='NaN')
dps = DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id__in=location_ids,
value__isnull=False
).values_list('id', 'value')
sum_dp_value = sum([y for x, y in dps if not isnan(y)])
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
#################################################
## ensure that raw data gets into AggDataPoint ##
#################################################
raw_value = DataPoint.objects.get(
# data_date = data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
ind_obj = Indicator.objects.get(id=indicator_id)
raw_value_in_agg = AggDataPoint.objects.get(
# data_date = data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
self.assertEqual(raw_value, raw_value_in_agg)
#############################################
## ensure that the aggregated data gets in ##
#############################################
loc_tree_df = DataFrame(list(LocationTree.objects.all().values()))
agg_df = DataFrame(list(AggDataPoint.objects.filter(\
indicator_id=indicator_id,\
campaign_id=self.campaign_id
).values()))
agg_value = AggDataPoint.objects.get(
indicator_id=indicator_id,
campaign_id=self.campaign_id,
location_id=agg_location_id
).value
self.assertEqual(agg_value, sum_dp_value)
######################################################
## ensure that any raw data will override aggregate ##
######################################################
override_value = 909090
agg_override_dp = self.create_datapoint(agg_location_id, data_date,
|
thaim/ansible
|
test/lib/ansible_test/_internal/cloud/aws.py
|
Python
|
mit
| 3,947 | 0.00228 |
"""AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
is_shippable,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloud
|
Provider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or us
|
e the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
|
googleapis/python-service-usage
|
samples/generated_samples/serviceusage_v1_generated_service_usage_list_services_async.py
|
Python
|
apache-2.0
| 1,497 | 0.000668 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not
|
use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithe
|
r express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListServices
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-service-usage
# [START serviceusage_v1_generated_ServiceUsage_ListServices_async]
from google.cloud import service_usage_v1
async def sample_list_services():
# Create a client
client = service_usage_v1.ServiceUsageAsyncClient()
# Initialize request argument(s)
request = service_usage_v1.ListServicesRequest(
)
# Make the request
page_result = client.list_services(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END serviceusage_v1_generated_ServiceUsage_ListServices_async]
|
b0nk/botxxy
|
src/oauth2.py
|
Python
|
gpl-2.0
| 23,431 | 0.001536 |
"""
The MIT License
Copyright (c) 2007 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {
'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret
}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s,
|
keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
|
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
if method is not None:
self.method = method
if url is not None:
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
parts = urlparse.urlparse(value)
scheme, netl
|
torchhound/projects
|
python/ffi.py
|
Python
|
gpl-3.0
| 264 | 0 |
import ct
|
ypes
libc = ctypes.CDLL("/usr/lib/libc.dylib")
print(libc.rand())
print(libc.time())
cPrintF = libc.printf
value = b"I'm a C function!"
print(value)
printValue = ctypes.c_char_p(value)
|
print(printValue.value)
print(printValue)
cPrintF("%s", printValue)
|
snim2mirror/openihm
|
src/openihm/model/database.py
|
Python
|
lgpl-3.0
| 949 | 0.002107 |
#!/usr/bin/env python
"""
A convinience wrapper around mysql connector.
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public Lice
|
nse
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
import includes.mysql.connector as connector
import data.database
#
|
refactored to remove duplicate code while
# providing same interface as before.
class Database(data.database.Database):
pass
|
kk1987/pycparser
|
tests/all_tests.py
|
Python
|
bsd-3-clause
| 294 | 0.006803 |
#!/usr/bin/env
|
python
import sys
sys.path.extend(['.', '..'])
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
[
'test_c_lexer',
'test_c_ast',
'test_general',
'test_c_parser',
|
]
)
unittest.TextTestRunner(verbosity=1).run(suite)
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/__init__.py
|
Python
|
apache-2.0
| 11,660 | 0.001286 |
# -*- coding: utf-8 -*-
from operator import
|
attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtyp
|
es import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class hostname(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/hostname. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines TLV 137.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "hostname"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"hostname",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/state (container)
YANG Description: State parameters of ISIS TLV 137.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of ISIS TLV 137.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class hostname(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/hostname. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines TLV 137.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "hostname"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(arg
|
ghaskins/obc-peer
|
openchain/peer/bddtests/environment.py
|
Python
|
apache-2.0
| 1,357 | 0.034635 |
from steps.bdd_test_util import cli_call
def after_scenario(context, scenario):
if 'doNotDecompose' in scenario.tags:
print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
else:
if 'compose_yaml' in context:
print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True)
context.com
|
pose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "rm","-f"], expect_success=True)
# now remove any other containers (chaincodes)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "ps", "-qa"], expect_success=True)
if context.compose_returncode == 0:
#
|
Remove each container
for containerId in context.compose_output.splitlines():
#print("docker rm {0}".format(containerId))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "rm", containerId], expect_success=True)
|
sid88in/incubator-airflow
|
airflow/utils/dates.py
|
Python
|
apache-2.0
| 9,508 | 0.000947 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from airflow.utils import timezone
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta # flake8: noqa: F401 for doctest
import six
from croniter import croniter
cron_presets = {
'@hourly': '0 * * * *',
'@daily': '0 0 * * *',
'@weekly': '0 0 * * 0',
'@monthly': '0 0 1 * *',
'@yearly': '0 0 1 1 *',
}
def date_range(start_date, end_date=None, num=None, delta=None):
"""
Get a set of dates as a list based on a start, end and delta, delta
can be something that can be added to ``datetime.datetime``
or a cron expression as a ``str``
:param start_date: anchor date to start the series from
:type start_date: datetime.datetime
:param end_date: right boundary for the date range
:type end_date: datetime.datetime
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:type num: int
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 3, 1, 0, 0)]
"""
if not delta:
return []
if end_date and start_date > end_date:
raise Exception("Wait. start_date needs to be before end_date")
if end_date and num:
raise Exception("Wait. Either specify end_date OR num")
if not end_date and not num:
end_date = timezone.utcnow()
delta_iscron = False
tz = start_date.tzinfo
if isinstance(delta, six.string_types):
delta_iscron = True
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
elif isinstance(delta, timedelta):
delta = abs(delta)
dates = []
if end_date:
if timezone.is_naive(start_date):
end_date = timezone.make_naive(end_date, tz)
while start_date <= end_date:
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
start_date = cron.get_next(datetime)
else:
start_date += delta
else:
for _ in range(abs(num)):
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
if num > 0:
start_date = cron.get_next(datetime)
else:
start_date = cron.get_prev(datetime)
else:
if num > 0:
start_date += delta
else:
start_date -= delta
return sorted(dates)
def round_time(dt, delta, start_date=timezone.make_awa
|
re(datetime.min)):
"""
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
|
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
if isinstance(delta, six.string_types):
# It's cron based, so it's easy
tz = start_date.tzinfo
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
prev = cron.get_prev(datetime)
if prev == start_date:
return timezone.make_aware(start_date, tz)
else:
return timezone.make_aware(prev, tz)
# Ignore the microseconds of dt
dt -= timedelta(microseconds=dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know its exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then disecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper * delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1) * delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (
(start_date + (lower + 1) * delta) - dt <=
dt - (start_date + lower * delta)):
return start_date + (lower + 1) * delta
else:
return start_date + lower * delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate * delta >= dt:
upper = candidate
else:
lower = candidate
# in the special case when start_date > dt the search for upper will
# immediately stop for upper == 1 which results in lower = upper // 2 = 0
# and this function returns start_date.
def infer_time_unit(time_seconds_arr):
"""
Determine the most appropriate time unit for an array of time durations
specified in seconds.
e.g. 5400 seconds => 'minutes', 36000 seconds =
|
LockScreen/Backend
|
venv/lib/python2.7/site-packages/awscli/customizations/cloudsearchdomain.py
|
Python
|
mit
| 1,074 | 0.000931 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BAS
|
IS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, ei
|
ther express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Customizations for the cloudsearchdomain command.
This module customizes the cloudsearchdomain command:
* Add validation that --endpoint-url is required.
"""
def register_cloudsearchdomain(cli):
cli.register_last('calling-command.cloudsearchdomain',
validate_endpoint_url)
def validate_endpoint_url(parsed_globals, **kwargs):
if parsed_globals.endpoint_url is None:
return ValueError(
"--endpoint-url is required for cloudsearchdomain commands")
|
dspaccapeli/bus-arrival
|
visualization/plot_delay_evo.py
|
Python
|
gpl-3.0
| 1,979 | 0.016675 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Description:
Plot the delay evolution during a run
for multiple ones having the run_time
(in seconds) shown on the X axis.
@author: dspaccapeli
"""
#imports to manage the sql db
import sqlite3 as lite
import pandas as pd
#to make the plot show-up from command line
import matplotlib.pyplot as plt
#to get multiple evolution of delay
from random import shuffle
#connect to the database
db_connection = lite.connect('DATABASE_PATH')
#open the cursor to start querying the database - read ops
read_curs = db_connection.cursor()
#this is line 550
route_id = 2550
#refine query
hh_start = 15
hh_end = 2
wk_start = 1
wk_end = 5
direction = 1
#select all infos for stop equals _n_sqlite
df = pd.read_sql_query("SELECT delay, sch_time-start_time as time, start_time as begin from hsl where route_id=%s and direction=%s or hour>=%s and hour<=%s order by time" % \
(route_id, direction, hh_start, hh_end), db_connection)
#select a list of all the different start_time -> they identify daily departures for a bus (~run_code)
unq_dep = df.begin.unique()
#init 9 plots
for count in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
#take a random run_code
sh
|
uffle(unq_dep)
i=0
for x in unq_dep:
i+=1
#for each run_code
temp = df[df['begin'] == x]
#plot evolution of the delay
plt.plot(temp['time'], temp['delay'], alpha=0.6)
#plt.scatter(temp['time'], temp['delay'], alpha=0.7)
#up to a max of
|
5 lines
if i==10:
break
plt.suptitle('Delay progression between %s and %s during the week' % (hh_start, hh_end))
plt.xlabel('run time')
plt.ylabel('delay')
plt.savefig(str(count), ext="png")
plt.clf()
#uncomment if you want to do it cumulatively
#plt.suptitle('Delay progression between %s and %s during the week' % (hh_start, hh_end))
#plt.xlabel('run time')
#plt.ylabel('delay')
#plt.savefig(str(count), ext="png")
#plt.clf()
|
UnrememberMe/pants
|
contrib/node/src/python/pants/contrib/node/subsystems/package_managers.py
|
Python
|
apache-2.0
| 8,798 | 0.007729 |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.contrib.node.subsystems.command import command_gen
LOG = logging.getLogger(__name__)
PACKAGE_MANAGER_NPM = 'npm'
PACKAGE_MANAGER_YARNPKG = 'yarnpkg'
PACKAGE_MANAGER_YARNPKG_ALIAS = 'yarn'
VALID_PACKAGE_MANAGERS = [PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, PACKAGE_MANAGER_YARNPKG_ALIAS]
# TODO: Change to enum type when migrated to Python 3.4+
class PackageInstallationTypeOption(object):
PROD = 'prod'
DEV = 'dev'
PEER = 'peer'
BUNDLE = 'bundle'
OPTIONAL = 'optional'
NO_SAVE = 'not saved'
class PackageInstallationVersionOption(object):
EXACT = 'exact'
TILDE = 'tilde'
class PackageManager(object):
"""Defines node package manager functionalities."""
def __init__(self, name, tool_installations):
self.name = name
self.tool_installations = tool_installations
def _get_installation_args(self, install_optional, production_only, force):
"""Returns command line args for installing package.
:param install_optional: True to request install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:rtype: list of strings
"""
raise NotImplementedError
def _get_run_script_args(self):
"""Returns command line args to run a package.json script.
:rtype: list of strings
"""
raise NotImplementedError
def _get_add_package_args(self, package, type_option, version_option):
"""Returns command line args to add a node pacakge.
:rtype: list of strings
"""
raise NotImplementedError()
def run_command(self, args=None, node_paths=None):
"""Returns a command that when executed will run an arbitury command via package manager."""
return command_gen(
self.tool_installations,
self.name,
args=args,
node_paths=node_paths
)
def install_module(
self,
install_optional=False,
production_only=False,
force=False,
node_paths=None):
"""Returns a command that when executed will install node package.
:param install_optional: True to install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param node_paths: A list of path that should be included in $PATH when
running installation.
"""
args=self._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force)
return self.run_command(args=args, node_paths=node_paths)
def run_script(self, script_name, script_args=None, node_paths=None):
"""Returns a command to execute a package.json script.
:param script_name: Name of the script to name. Note that script name 'test'
can be used to run node tests.
:param script_args: Args to be passed to package.json script.
:param node_paths: A list of path that should be included in $PATH when
running the script.
"""
# TODO: consider add a pants.util function to manipulate command line.
package_manager_args = self._get_run_script_args()
package_manager_args.append(script_name)
if script_args:
package_manager_args.append('--')
package_manager_args.extend(script_args)
return self.run_command(args=package_manager_args, node_paths=node_paths)
def add_package(
self,
package,
node_paths=None,
type_option=PackageInstallationTypeOption.PROD,
version_option=None):
"""Returns a command that when executed will add a node package to current node module.
:param package: string. A valid npm/yarn package description. The accepted forms are
package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz
https://url/to.tgz
:param node_paths: A list of path that should be included in $PATH when
running the script.
:param type_option: A value from PackageInstallationTypeOption that indicates the type
of package to be installed. Default to 'prod', which is a production dependency.
:param version_option: A value from PackageInstallationVersionOption that indicates how
to match version. Default to None, which uses package manager default.
"""
args=self._get_add_package_args(
package,
type_option=type_option,
version_option=version_option)
return self.run_command(args=args, node_paths=node_paths)
def run_cli(self, cli, args=None, node_paths=None):
"""Returns a command that when executed will run an installed cli via package manager."""
cli_args = [cli]
if args:
cli_args.append('--')
cli_args.extend(args)
return self.run_command(args=cli_args, node_paths=node_paths)
class PackageManagerYarnpkg(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerYarnpkg, self).__init__(PACKAGE_MANAGER_YARNPKG, tool_installation)
def _get_run_script_args(self):
return ['run']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['--non-interactive']
if not install_optional:
return_args.append('--ignore-optional')
if production_only:
return_args.append('--production=true')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['add', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '', # Yarn save production is the default.
PackageInstallationTypeOption.DEV: '--dev',
PackageInstallationTypeOption.PEER: '--peer',
PackageInstallationTypeOption.OPTIONAL: '--optional',
PackageInstallationTypeOption.BUNDLE: None,
PackageInstallationTypeOption.NO_SAVE: None,
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_t
|
ype_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--exact',
PackageInstalla
|
tionVersionOption.TILDE: '--tilde',
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
class PackageManagerNpm(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerNpm, self).__init__(PACKAGE_MANAGER_NPM, tool_installation)
def _get_run_script_args(self):
return ['run-script']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['install']
if not install_optional:
return_args.append('--no-optional')
if production_only:
return_args.append('--production')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['install', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '--save-prod',
PackageInstallationTypeOption.DEV: '--save-dev',
PackageInstallationTypeOption.PEER: None,
PackageInstallationTypeOption.OPTIONAL: '--save-optional',
PackageInstallationTypeOption.BUNDLE: '--save-bundle',
PackageInstallationTypeOption.NO_SAVE: '--no-save',
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(packag
|
darrenbilby/grr
|
lib/plist.py
|
Python
|
apache-2.0
| 5,402 | 0.004813 |
#!/usr/bin/env python
"""Shared classes between the client and the server for plist parsing."""
import calendar
import datetime
from binplist import binplist
from grr.lib import lexer
from grr.lib import objectfilter
class PlistFilterParser(objectfilter.Parser):
"""Plist specific filter parser.
Because we will be filtering dictionaries and the path components will be
matched against dictionary keys, we must be more permissive with attribute
names.
This parser allows path components to be enclosed in double quotes to allow
for spaces, dots or even raw hex-escaped data in them, such as:
"My\x20first\x20path component".2nd."TH.IRD" contains "Google"
We store the attribute name as a list of paths into the object instead of as
a simple string that will be chunked in objectfilter.
"""
tokens = [
# Operators and related tokens
lexer.Token("INITIAL", r"\@[\w._0-9]+",
"ContextOperator,PushState", "CONTEXTOPEN"),
lexer.Token("INITIAL", r"[^\s\(\)]", "PushState,PushBack", "ATTRIBUTE"),
lexer.Token("INITIAL", r"\(", "PushState,BracketOpen", None),
lexer.Token("INITIAL", r"\)", "BracketClose", "BINARY"),
# Context
lexer.Token("CONTEXTOPEN", r"\(", "BracketOpen", "INITIAL"),
# Double quoted string
lexer.Token("STRING", "\"", "PopState,StringFinish", None),
lexer.Token("STRING", r"\\x(..)", "HexEscape", None),
lexer.Token("STRING", r"\\(.)", "StringEscape", None),
lexer.Token("STRING", r"[^\\\"]+", "StringInsert", None),
# Single quoted string
lexer.Token("SQ_STRING", "'", "PopState,StringFinish", None),
lexer.Token("SQ_STRING", r"\\x(..)", "HexEscape", None),
lexer.Token("SQ_STRING", r"\\(.)", "StringEscape", None),
lexer.Token("SQ_STRING", r"[^\\']+", "StringInsert", None),
# Basic expression
lexer.Token("ATTRIBUTE", r"\.", "AddAttributePath", "ATTRIBUTE"),
lexer.Token("ATTRIBUTE", r"\s+", "AddAttributePath", "OPERATOR"),
lexer.Token("ATTRIBUTE", "\"", "PushState,StringStart", "STRING"),
lexer.Token("ATTRIBUTE",
r"[\w_0-9\-]+",
"StringStart,StringInsert",
"ATTRIBUTE"),
lexer.Token("OPERATOR", r"(\w+|[<>!=]=?)", "StoreOperator", "ARG"),
lexer.Token("ARG", r"(\d+\.\d+)", "InsertFloatArg", "ARG"),
lexer.Token("ARG", r"(0x\d+)", "InsertInt16Arg", "ARG"),
lexer.Token("ARG", r"(\d+)", "InsertIntArg", "ARG"),
lexer.Token("ARG", "\"", "PushState,StringStart", "STRING"),
lexer.Token("ARG", "'", "PushState,StringStart", "SQ_STRING"),
# When the last parameter from arg_list has been pushed
# State where binary operators are supported (AND, OR)
lexer.Token("BINARY", r"(?i)(and|or|\&\&|\|\|)",
"BinaryOperator", "INITIAL"),
# - We can also skip spaces
lexer.Token("BINARY", r"\s+", None, None),
# - But if it's not "and" or just spaces we have to go back
lexer.Token("BINARY", ".", "PushBack,PopState", None),
# Skip whitespace.
lexer.Token(".", r"\s+", None, None),
]
def StringFinish(self, **_):
"""StringFinish doesn't act on ATTRIBUTEs here."""
if self.state == "ARG":
return self.InsertArg(string=self.string)
def AddAttributePath(self, **_):
"""Adds a path component to the current attribute."""
attribute_path = self.current_expression.attribute
if not attribute_path:
attribute_path = []
attribute_path.append(self.string)
self.current_expression.SetAttribute(attribute_path)
class PlistExpander(objectfilter.ValueExpander):
"""A custom expander specific to plists."""
def _GetValue(self, obj, attr_name):
try:
return obj.get(a
|
ttr_name, None)
except AttributeError:
# This is no dictionary... are we a list of dictionaries?
return [item.get(attr_name, None) for item in obj]
def _AtNonLeaf(self, attr_value, path):
"""Makes dictionaries expandable when dealing with plists."""
if isinstance(attr_value, dict):
for value in self.Expand(attr_value, path[1:]):
yield value
else:
for v in objectfilter.ValueExpander._AtNonLeaf(self, attr_value, path):
yie
|
ld v
class PlistFilterImplementation(objectfilter.BaseFilterImplementation):
FILTERS = {}
FILTERS.update(objectfilter.BaseFilterImplementation.FILTERS)
FILTERS.update({"ValueExpander": PlistExpander})
def PlistValueToPlainValue(plist):
"""Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type.
"""
if isinstance(plist, dict):
ret_value = dict()
for key, value in plist.items():
ret_value[key] = PlistValueToPlainValue(value)
return ret_value
elif isinstance(plist, list):
return [PlistValueToPlainValue(value) for value in plist]
elif isinstance(plist, binplist.RawValue):
return plist.value
elif (isinstance(plist, binplist.CorruptReference)
or isinstance(plist, binplist.UnknownObject)):
return None
elif isinstance(plist, datetime.datetime):
return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond
return plist
|
leovoel/glc.py
|
examples/custom_rendering.py
|
Python
|
mit
| 473 | 0.002114 |
from math imp
|
ort cos, sin, pi
from example_util import get_filename
from glc import Gif
def draw(l, surf, ctx, t):
xpos = cos(t * 2 * pi) * 100 + surf.get_width() * 0.5
ypos = sin(t * 2 * pi) * 100 + surf.get_height() * 0
|
.5
w, h = 100, 100
ctx.set_source_rgb(0, 0, 0)
ctx.translate(xpos, ypos)
ctx.translate(-w * 0.5, -h * 0.5)
ctx.rectangle(0, 0, w, w)
ctx.fill()
with Gif(get_filename(__file__), after_render=draw) as a:
a.save()
|
sbrichards/rockstor-core
|
src/rockstor/storageadmin/views/share_iscsi.py
|
Python
|
gpl-3.0
| 4,001 | 0.00025 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import (BasicAuthentication,
SessionAuthentication)
from django.db import transaction
from storageadmin.auth import DigestAuthentication
from storageadmin.models import (Share, SambaShare, NFSExport, Disk,
IscsiTarget)
from storageadmin.util import handle_exception
from storageadmin.serializers import IscsiSerializer
from system.iscsi import export_iscsi
from fs.btrfs import mount_share
import logging
logger = logging.getLogger(__name__)
class ShareIscsiView(APIView):
def get(self, request, sname):
try:
share = Share.objects.get(name=sname)
if (IscsiTarget.objects.filter(share=share).exists()):
iscsi_o = IscsiTarget.objects.get(share=share)
iscsi_serializer = IscsiSerializer(iscsi_o)
return Response(iscsi_serializer.data)
return Response()
except Exception, e:
handle_exception(e, request)
@transaction.commit_on_success
def post(self, request, sname):
try:
share = Share.objects.get(name=sname)
if (SambaShare.objects.filter(share=share).exists()):
raise Exception('Already exported via Samba')
if (NFSExport.objects.filter(share=share).exists()):
raise Exception('Already exported via nfs')
if (IscsiTarget.objects.filter(share=share).exists()):
raise Exception('Already exported via iscsi')
options = {
'tname': 'fooscsi',
'tid': -1,
'dev_name': 'iscsi.img',
'dev_size': 10,
}
if ('tname' in request.data):
options['tname'] = request.data['tname']
if ('tid' in request.data):
try:
options['tid'] = int(request.data['tid'])
except:
raise Exception('tid must be an integer')
pool_device = Disk.objects.filter(pool=share.pool)[0].name
mnt_pt = '/mnt2/' + share.name
mount_share(share.name, pool_device, mnt_pt)
dev_name = mnt_pt + '/' + options['dev_name']
export_iscsi(options['tid'], options['tname'], options['tid'],
dev_name, options['dev_size'])
iscsi_target = IscsiTarget(share=share, tid=options['tid'],
tname=options['tname'],
de
|
v_name=dev_name,
dev_size=options['dev_size'])
iscsi_target.save()
iscsi_serializer = IscsiSerializer(iscsi_target)
return Re
|
sponse(iscsi_serializer.data)
except Exception, e:
handle_exception(e, request)
@transaction.commit_on_success
def delete(self, request, sname):
try:
share = Share.objects.get(name=sname)
iscsi_target = IscsiTarget.objects.get(share=share)
iscsi_target.delete()
return Response()
except Exception, e:
handle_exception(e, request)
|
arnovich/core
|
test/yahoo/py/extract_to_srv.py
|
Python
|
bsd-3-clause
| 2,381 | 0.025619 |
# small script for
from optparse import OptionParser
import sqlite3
import time
import string
import arnovich.core as core
def parse_command_args():
parser = OptionParser()
parser.add_option("-t", "--ticker", action="append", type="string", dest="tickers")
parser.add_option("--from", action="store", type="string", dest="fromdate", default="")
parser.add_option("--to", action="store", type="string", dest="todate", default="")
parser.add_option("--db", action="store", dest="dbfile")
parser.add_option("--wait", action="store", type="int", dest="wait", default=1)
(options, args) = parser.parse_args()
return (options.dbfile, options.tickers, options.fromdate, options.todate, options.wait)
def find_dates(dbfile, tickers, fromdate, todate, wait):
if fromdate != "":
fromtime = time.mktime(time.strptime(fromdate, "%Y-%m-%d %H:%M:%S"))
if todate != "":
totime = time.mktime(time.strptime(todate, "%Y-%m-%d %H:%M:%S"))
connection = core.connection()
sql_tickers = string.join(tickers, "\",\"")
conn = sqlite3.connect(dbfile)
c = conn.cursor()
d = conn.cursor()
c.execute("select ticker_id, ticker from stocks_static_data where ticker in (\""+sql_tickers+"\")")
prevtime = 0
for ticker_id in c:
#should check if it already exists using get_t
|
icker
srv_id = connection.add_ticker(str(ticker_id[1]))
srv_id_opt = connection.add_ticker(str(ticker_id[1])+"_options")
if (fromdate == "") or (todate == ""):
d.execute("select date, data from stocks_data where ticker_id="+str(ticker_id[0])+" ORDER BY date")
else:
d.execute("select date, data from stocks_data where ticker_id="+str(ticker_id[0])+" and (date > "+str(fromtime)+" AND date < "+str(totime)+")")
for r in d:
rowdate = str(r[0])
row
|
data = str(r[1])
rowtime = float(r[0])
if prevtime == 0:
prevtime = rowtime
connection.push_ticker(srv_id, rowdata)
vcursor = conn.cursor()
vcursor.execute("select data from options_data where ticker_id="+str(ticker_id[0])+" and date="+rowdate)
for row in vcursor:
connection.push_ticker(srv_id_opt, str(row[0]))
#TODO make this better: take exec time into consideration
time.sleep((rowtime-prevtime)/wait)
prevtime = rowtime
c.close()
def main():
(dbfile, tickers, fromdate, todate, wait) = parse_command_args()
find_dates(dbfile, tickers, fromdate, todate, wait)
if __name__ == "__main__":
main()
|
saai/codingbitch
|
twoPointers/minSubArrayLen.py
|
Python
|
mit
| 623 | 0.008026 |
class Solution:
# @param {integer} s
# @param {integer[]} nums
# @return {integer}
def minSubArrayLen(self, s, nums):
i
|
= 0
j = -1
n = len(nums)
t = 0
min_len = sys.maxint
while(i<n and j <n):
if t < s:
j += 1
if j >=n :
break
t += nums[j]
else:
if min_len > (j-i+1):
min_len = j-i+1
t -= nums[i]
|
i += 1
if min_len == sys.maxint:
return 0
else:
return min_len
|
trueneu/swiss-knife
|
swk_plugins/swk_casp/swk_casp/version.py
|
Python
|
gpl-3.0
| 24 | 0 |
__v
|
ersion__ = "0.0.2a3"
| |
mattjj/pyhsmm-autoregressive
|
autoregressive/util.py
|
Python
|
gpl-2.0
| 1,782 | 0.016835 |
from __future__ import division
import numpy as np
from numpy.lib.stride_tricks import as_strided as ast
### striding data for efficient AR computations
def AR_striding(data,nlags):
# I had some trouble with views and as_strided, so copy if
|
not contiguous
data = np.asarray(data)
if not data.flags.c_contiguous:
data = data.copy(order='C')
if data.ndim == 1:
data = np.reshape(data,(-1,1))
sz = data.dtype.itemsize
return ast(
data,
shape=(data.shape[0]-nlags,data.shape[1]*(nlags+1)),
strides=(data.shape[1]*sz,sz))
def undo_AR_striding(strided_dat
|
a,nlags):
sz = strided_data.dtype.itemsize
return ast(
strided_data,
shape=(strided_data.shape[0]+nlags,strided_data.shape[1]//(nlags+1)),
strides=(strided_data.shape[1]//(nlags+1)*sz,sz))
### analyzing AR coefficient matrices
def canonical_matrix(A):
# NOTE: throws away affine part
D, nlags, _ = dimensions(A)
mat = np.zeros((D*nlags,D*nlags))
mat[:-D,D:] = np.eye(D*(nlags-1))
mat[-D:,:] = A[:,:D*nlags]
return mat
def eval_siso_transfer_function(A,from_idx,to_idx,freqs):
D, _, _ = dimensions(A)
assert 0 <= from_idx < D and 0 <= to_idx < D
bigA = canonical_matrix(A)
I = np.eye(bigA.shape[0])
zs = np.exp(1j*np.array(freqs))
return np.array(
[np.linalg.inv(z*I-bigA)[-D:,-2*D:-D][to_idx,from_idx]
for z in zs])
def is_affine(A):
return bool(A.shape[1] % A.shape[0])
def is_stable(A):
bigA = canonical_matrix(A)
return np.all(np.abs(np.linalg.eigvals(bigA)) < 1.)
def dimensions(A):
if is_affine(A):
A = A[:,:-1]
D, nlags = A.shape[0], A.shape[1] // A.shape[0]
return D, nlags, is_affine(A)
|
samueljackson92/metaopt
|
python_tests/simulated_annealing_test.py
|
Python
|
mit
| 1,038 | 0.000963 |
import unittest
import numpy as np
import
|
pyoptima as opt
class SimulatedAnnealingTest(unittest.TestCase):
def test_with_parabola(self):
""" Test with a simple parabolic function with 2 variables """
def neighbour_func(params):
|
new_params = params
params['x0'] += np.random.uniform(-1., 1.)
params['x1'] += np.random.uniform(-1., 1.)
return new_params
hyper_params = {
'temperature_func': lambda t, i: t/np.log(i+2),
'neighbour_func': neighbour_func,
'initial_temp': 1000000.0
}
params = {}
params["x0"] = np.random.uniform(-10., 10.)
params["x1"] = np.random.uniform(-10., 10.)
s = opt.SimulatedAnnealing(params, hyper_params)
s.optimize(opt.parabola, 100000)
bst_solution = s.get_best_parameters()
self.assertAlmostEqual(bst_solution['x0'], 0, 2)
self.assertAlmostEqual(bst_solution['x1'], 0, 2)
if __name__ == "__main__":
unittest.main()
|
angr/angr
|
tests/test_strcasecmp.py
|
Python
|
bsd-2-clause
| 780 | 0.00641 |
import angr
import claripy
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_i386():
p = angr.Project(os.path.join(test_location, 'i386', 'test_strcasecmp'), auto_lo
|
ad_libs=False)
arg1 = claripy.BVS('arg1', 20*8)
s = p.factory.entry_state(args=("test_strcasecmp", arg1))
sm = p.factory.simulation_manager(s)
sm.ex
|
plore()
sm.move('deadended', 'found', filter_func=lambda s: b"Welcome" in s.posix.dumps(1))
assert len(sm.found) == 1
f = sm.found[0]
sol = f.solver.eval(arg1, cast_to=bytes)
assert b'\x00' in sol
assert sol[:sol.index(b'\x00')].lower() == b'letmein'
assert b'wchar works' in f.posix.dumps(1)
if __name__ == "__main__":
test_i386()
|
lowellbander/ngVote
|
priorityDB/priorityDB/admin.py
|
Python
|
gpl-2.0
| 418 | 0.014354 |
from d
|
jango.contrib import admin
from priorityDB.models import *
# Register your models here
# For more information on this file, see
# https://docs.djangoproject.com/en/dev/intro/tutorial02/
class TaskHistoryInline(admin.StackedInline):
model = TaskHistory
extra = 0
class EventAdmin(admin.ModelAdmin):
inlines = [TaskHistoryInline]
admin.site.regis
|
ter(Event, EventAdmin)
admin.site.register(Task)
|
gomezsan/ochopod
|
examples/sanity.py
|
Python
|
apache-2.0
| 2,544 | 0.003931 |
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, soft
|
ware
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script illustrates how we handle process level sanity checks. This is an optional feature which lets you
customize how you wa
|
nt ochopod to know what you're running is healthy. You could curl the process or run some script
for instance.
Too many sanity check failures will turn the pod off (which can be seen in the CLI for instance). Just start a local
standalone Zookeeper server and run "python sanity.py".
"""
from ochopod.bindings.generic.marathon import Pod
from ochopod.models.piped import Actor as Piped
if __name__ == '__main__':
class Strategy(Piped):
#
# - by default ochopod will only allow for one single sanity check to fail before turning off the pod
# - you can specify both how many times you are ready to fail and how much time should go by in between
# - here we want to tolerate up to 3 sanity check failures in a row with 5 seconds between each
#
checks = 3
check_every = 5.0
def sanity_check(self, _):
#
# - this optional callback will be invoked by ochopod on a regular basis
# - you can do whatever you want inside and the goal is to not throw
# - you can for instance simply assert if something is not right
# - let's make it fail for the sake of illustration
# - the callback will be invoked (and will blow up) every 5 seconds up to 3 times
#
assert 0, 'let us fail the sanity check just for fun'
def configure(self, _):
#
# - just go to sleep, the point is not to run anything meaningful
# - the sanity-check will keep failing until the pod turns off
#
return 'sleep 3600', {}
#
# - if you run this script locally you will notice the pod will turn off after around 15 seconds
# - simply type CTRL-C to exit
#
Pod().boot(Strategy, local=1)
|
workbandits/gamerocket-python-guide
|
1_create_player/src/app.py
|
Python
|
mit
| 837 | 0.016726 |
import gamerocket
from flask import Flask, request, render_template
app = Flask(__name__)
gamerocket.Configuration.configure(gamerocket.Environment.Development,
apiKey = "your_
|
apiKey",
secretKey = "your_secretKey")
@app.route("/")
def form():
return render_template("form.html")
@app.route("/create_player", met
|
hods=["POST"])
def create_player():
result = gamerocket.Player.create({
"name":request.form["name"],
"locale":request.form["locale"]
})
if result.is_success:
return "<h1>Success! Player ID: " + result.player.id + "</h1>"
else:
return "<h1>Error " + result.error + ": " + result.error_description + "</h1>"
if __name__ == '__main__':
app.run(debug=True)
|
nyu-dl/WebNav
|
simple_parser.py
|
Python
|
bsd-3-clause
| 399 | 0.010025 |
'''
Simple parse
|
r that extracts a webpage's content and hyperlinks.
'''
import urllib2
import re
class Parser():
def __init__(self):
pass
def parse(self, url):
|
f = urllib2.urlopen(url)
text = f.read() # get page's contents.
#use re.findall to get all the links
links = re.findall('href=[\'"]?([^\'" >]+)', text)
return text, links
|
nathanbjenx/cairis
|
cairis/core/ResponseParameters.py
|
Python
|
apache-2.0
| 1,469 | 0.008169 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from . import ObjectCreationParameters
__author__ = 'Shamal Faily'
class ResponseParameters(ObjectCreationParameters.ObjectCreationParameters):
def __init__(self,respName,r
|
espRisk,tags,cProps,rType):
ObjectCreationParameters.ObjectCreationParameters.__init__(self)
self.theName = respName
self.theTags = tags
self.theRisk = respRisk
self.theEnvironmentProperties = cProps
self.theResponseType = rType
def name(self): return self.theName
def tags(self): return self.theTags
def risk(self): return self.theRisk
def environmentProperties(self): return self.theEnvironmentProperties
def responseType(self): return self.theResponseT
|
ype
|
Azure/azure-sdk-for-python
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_spark_configuration_operations.py
|
Python
|
mit
| 6,172 | 0.004213 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
spark_configuration_name: str,
workspace_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sparkconfigurations/{sparkConfigurationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class SparkConfigurationOperations(object):
"""SparkConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
spark_configuration_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SparkConfigurationResource":
"""Get SparkConfiguration by name.
Get SparkConfiguration by name in a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param spark_configuration_name: SparkConfiguration name.
:type spark_configuration_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkConfigurationResource, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.SparkConfigurationResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SparkConfigurationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
spark_configuration_name=spark_configuration_name,
workspace_name=workspace_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [
|
200]:
map_error(status_code=response.status_code, res
|
ponse=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SparkConfigurationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sparkconfigurations/{sparkConfigurationName}'} # type: ignore
|
scaphe/lettuce-dirty
|
tests/integration/django/couves/leaves/views.py
|
Python
|
gpl-3.0
| 872 | 0.001148 |
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2011> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it
|
under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is dist
|
ributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse
def index(request):
return HttpResponse('OK')
|
KAMI911/loec
|
examples/Sharpen/binaries-windows-python26/PcfFontFile.py
|
Python
|
gpl-3.0
| 6,642 | 0.004968 |
#
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id: PcfFontFile.py 2134 2004-10-06 08:55:20Z fredrik $
#
# portable compiled font file parser
#
# history:
# 1997-08-19 fl created
# 2003-09-13 fl fixed loading of unicode fonts
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import Image
import FontFile
import string
# --------------------------------------------------------------------
# declarations
PCF_MAGIC = 0x70636601 # "\x01fcp"
PCF_PROPERTIES = (1<<0)
PCF_ACCELERATORS = (1<<1)
PCF_METRICS = (1<<2)
PCF_BITMAPS = (1<<3)
PCF_INK_METRICS = (1<<4)
PCF_BDF_ENCODINGS = (1<<5)
PCF_SWIDTHS = (1<<6)
PCF_GLYPH_NAMES = (1<<7)
PCF_BDF_ACCELERATORS = (1<<8)
BYTES_PER_ROW = [
lambda bits: ((bits+7) >> 3),
lambda bits: ((bits+15) >> 3) & ~1,
lambda bits: ((bits+31) >> 3) & ~3,
lambda bits: ((bits+63) >> 3) & ~7,
]
def l16(c):
return ord(c[0]) + (ord(c[1])<<8)
def l32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
def b16(c):
return ord(c[1]) + (ord(c[0])<<8)
def b32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24)
def sz(s, o):
return s[o:string.index(s, "\0", o)]
##
# Font file plugin for the X11 PCF format.
class PcfFontFile(FontFile.FontFile):
name = "name"
def __init__(self, fp):
magic = l32(fp.read(4))
if magic != PCF_MAGIC:
raise SyntaxError, "not a PCF file"
FontFile.FontFile.__init__(self)
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
#
# create glyph structure
for ch in range(256):
ix = encoding[ch]
if ix is not None:
x, y, l, r, w, a, d, f = metrics[ix]
glyph = (w, 0), (l, d-y, x+l, d), (0, 0, x, y), bitmaps[ix]
self.glyph[ch] = glyph
def _getformat(self, tag):
format, size, offset = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if format & 4:
i16, i32 = b16, b32
else:
i16, i32 = l16, l32
return fp, format, i16, i32
def _load_properties(self):
#
# font properties
properties = {}
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
# read property description
p = []
for i in range(nprops):
p.append((i32(fp.read(4)), ord(fp.read(1)), i32(fp.read(4))))
if nprops & 3:
fp.seek(4 - (nprops & 3), 1) #
|
pad
data = fp.read(i32(fp.read(4)))
for k, s, v in p:
k = sz(data, k)
if s:
v = sz(data, v)
properties[k] = v
|
return properties
def _load_metrics(self):
#
# font metrics
metrics = []
fp, format, i16, i32 = self._getformat(PCF_METRICS)
append = metrics.append
if (format & 0xff00) == 0x100:
# "compressed" metrics
for i in range(i16(fp.read(2))):
left = ord(fp.read(1)) - 128
right = ord(fp.read(1)) - 128
width = ord(fp.read(1)) - 128
ascent = ord(fp.read(1)) - 128
descent = ord(fp.read(1)) - 128
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, 0)
)
else:
# "jumbo" metrics
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, attributes)
)
return metrics
def _load_bitmaps(self, metrics):
#
# bitmap data
bitmaps = []
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if nbitmaps != len(metrics):
raise IOError, "Wrong number of bitmaps"
offsets = []
for i in range(nbitmaps):
offsets.append(i32(fp.read(4)))
bitmapSizes = []
for i in range(4):
bitmapSizes.append(i32(fp.read(4)))
byteorder = format & 4 # non-zero => MSB
bitorder = format & 8 # non-zero => MSB
padindex = format & 3
bitmapsize = bitmapSizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = "1;R"
if bitorder:
mode = "1"
for i in range(nbitmaps):
x, y, l, r, w, a, d, f = metrics[i]
b, e = offsets[i], offsets[i+1]
bitmaps.append(
Image.fromstring("1", (x, y), data[b:e], "raw", mode, pad(x))
)
return bitmaps
def _load_encoding(self):
# map character code to bitmap index
encoding = [None] * 256
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2))
firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2))
default = i16(fp.read(2))
nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1)
for i in range(nencoding):
encodingOffset = i16(fp.read(2))
if encodingOffset != 0xFFFF:
try:
encoding[i+firstCol] = encodingOffset
except IndexError:
break # only load ISO-8859-1 glyphs
return encoding
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.