repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
Azure/azure-sdk-for-python
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_generated/v7_2/aio/_configuration.py
|
Python
|
mit
| 2,282 | 0.005697 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Change
|
s may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
VERSION = "unknown"
class KeyVaultClientConfiguration(Configuration):
"""Configuration for KeyVaultClient.
Note that all parameters used to create this instance are saved as instance
|
attributes.
:keyword api_version: Api Version. The default value is "7.2". Note that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
**kwargs: Any
) -> None:
super(KeyVaultClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "7.2") # type: str
self.api_version = api_version
kwargs.setdefault('sdk_moniker', 'keyvault/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
|
jjas0nn/solvem
|
tensorflow/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.py
|
Python
|
mit
| 43,332 | 0.002285 |
"""Tests for the array padding functions.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
TestCase)
from numpy.lib import pad
class TestConditionalShortcuts(TestCase):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
modes = ['constant',
'edge',
'linear_ramp',
'maximum',
'mean',
'median',
'minimum',
'reflect',
'symmetric',
'wrap',
]
for mode in modes:
assert_array_equal(test, pad(test, pad_amt, mode=mode))
def test_shallow_statistic_range(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode='edge'),
pad(test, pad_amt, mode=mode, stat_length=1))
def test_clip_statistic_range(self):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode=mode),
pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(TestCase):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66.,
|
67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 9
|
8., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
|
ppizarror/Hero-of-Antair
|
data/images/pil/ImageOps.py
|
Python
|
gpl-2.0
| 13,229 | 0.00189 |
#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import operator
##
# (New in 1.1.3) The <b>ImageOps</b> module contains a number of
# 'ready-made' image processing operations. This module is somewhat
# experimental, and most operators only work on L and RGB images.
#
# @since 1.1.3
##
#
# helpers
def _border(border):
if type(border) is type(()):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color, mode):
if Image.isStringType(color):
import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image, lut):
if image.mode == "P":
# FIXME: apply to lookup table, not image data
raise NotImplementedError("mode P support coming soon")
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
raise IOError, "not supported for this image mode"
#
# actions
##
# Maximize (normalize) image contrast. This function calculates a
# histogram of the input image, removes <i>cutoff</i> percent of the
# lightest and darkest pixels from the histogram, and remaps the image
# so that the darkest pixel becomes black (0), and the lightest
# becomes white (255).
#
# @param image The image to process.
# @param cutoff How many percent to cut off from the histogram.
# @param ignore The background pixel value (use None for no background).
# @return An image.
def autocontrast(image, cutoff=0, ignore=None):
"Maximize image contrast, based on histogram"
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer:layer+256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(range(256))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(ima
|
ge, lut)
##
# Colorize grayscale image. The <i>black</i> and <i>white</i>
# arguments should be RGB tuples; this function calculates a colour
# wedge mapping all black pixels in the source image to the first
# colour, and all white pixels to the second colour.
#
# @param image The image to colourize.
# @param black The colour to use for black input pixels.
# @param white The colour to use for white input pixels.
# @return
|
An image.
def colorize(image, black, white):
"Colorize a grayscale image"
assert image.mode == "L"
black = _color(black, "RGB")
white = _color(white, "RGB")
red = []; green = []; blue = []
for i in range(256):
red.append(black[0]+i*(white[0]-black[0])/255)
green.append(black[1]+i*(white[1]-black[1])/255)
blue.append(black[2]+i*(white[2]-black[2])/255)
image = image.convert("RGB")
return _lut(image, red + green + blue)
##
# Remove border from image. The same amount of pixels are removed
# from all four sides. This function works on all image modes.
#
# @param image The image to crop.
# @param border The number of pixels to remove.
# @return An image.
# @see Image#Image.crop
def crop(image, border=0):
"Crop border off image"
left, top, right, bottom = _border(border)
return image.crop(
(left, top, image.size[0]-right, image.size[1]-bottom)
)
##
# Deform the image.
#
# @param image The image to deform.
# @param deformer A deformer object. Any object that implements a
# <b>getmesh</b> method can be used.
# @param resample What resampling filter to use.
# @return An image.
def deform(image, deformer, resample=Image.BILINEAR):
"Deform image using the given deformer"
return image.transform(
image.size, Image.MESH, deformer.getmesh(image), resample
)
##
# Equalize the image histogram. This function applies a non-linear
# mapping to the input image, in order to create a uniform
# distribution of grayscale values in the output image.
#
# @param image The image to equalize.
# @param mask An optional mask. If given, only the pixels selected by
# the mask are included in the analysis.
# @return An image.
def equalize(image, mask=None):
"Equalize image histogram"
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = filter(None, h[b:b+256])
if len(histo) <= 1:
lut.extend(range(256))
else:
step = (reduce(operator.add, histo) - histo[-1]) / 255
if not step:
lut.extend(range(256))
else:
n = step / 2
for i in range(256):
lut.append(n / step)
n = n + h[i+b]
return _lut(image, lut)
##
# Add border to the image
#
# @param image The image to expand.
# @param border Border width, in pixels.
# @param fill Pixel fill value (a colour value). Default is 0 (black).
# @return An image.
def expand(image, border=0, fill=0):
"Add border to image"
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
out.paste(image, (left, top))
return out
##
# Returns a sized and cropped version of the image, cropped to the
# requested aspect ratio and size.
# <p>
# The <b>fit</b> function was contributed by Kevin Cazabon.
#
# @param size The requested output size in pixels, given as a
# (width, height) tuple.
# @param method What resampling method to use. Default is Image.NEAREST.
# @param bleed Remove a border around the outside of the image (from all
# four edges. The value is a decimal percentage (use 0.01 for one
# percent). The default value is 0 (no border).
# @param centering Control the cropping position. Use (0.5, 0.5) for
# center cropping (e.g. if cropping the width, take 50% off of the
# left side, and therefore 50
|
masahir0y/buildroot-yamada
|
support/scripts/cpedb.py
|
Python
|
gpl-2.0
| 7,878 | 0.003427 |
#!/usr/bin/env python3
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element, SubElement
import gzip
import os
import requests
import time
from xml.dom import minidom
VALID_REFS = ['VENDOR', 'VERSION', 'CHANGE_LOG', 'PRODUCT', 'PROJECT', 'ADVISORY']
CPEDB_URL = "https://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.gz"
ns = {
'': 'http://cpe.mitre.org/dictionary/2.0',
'cpe-23': 'http://scap.nist.gov/schema/cpe-extension/2.3',
'xml': 'http://www.w3.org/XML/1998/namespace'
}
class CPE:
def __init__(self, cpe_str, titles, refs):
self.cpe_str = cpe_str
self.titles = titles
self.references = refs
self.cpe_cur_ver = "".join(self.cpe_str.split(":")[5:6])
def update_xml_dict(self):
ET.register_namespace('', 'http://cpe.mitre.org/dictionary/2.0')
cpes = Element('cpe-list')
cpes.set('xmlns:cpe-23', "http://scap.nist.gov/schema/cpe-extension/2.3")
cpes.set('xmlns:ns6', "http://scap.nist.gov/schema/scap-core/0.1")
cpes.set('xmlns:scap-core', "http://scap.nist.gov/schema/scap-core/0.3")
cpes.set('xmlns:config', "http://scap.nist.gov/schema/configuration/0.1")
cpes.set('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance")
cpes.set('xmlns:meta', "http://scap.nist.gov/schema/cpe-dictionary-metadata/0.2")
cpes.set('xsi:schemaLocation', " ".join(["http://scap.nist.gov/schema/cpe-extension/2.3",
"https://scap.nist.gov/schema/cpe/2.3/cpe-dictionary-extension_2.3.xsd",
"http://cpe.mitre.org/dictionary/2.0",
"https://scap.nist.gov/schema/cpe/2.3/cpe-dictionary_2.3.xsd",
"http://scap.nist.gov/schema/cpe-dictionary-metadata/0.2",
"https://scap.nist.gov/schema/cpe/2.1/cpe-dictionary-metadata_0.2.xsd",
"http://scap.nist.gov/schema/scap-core/0.3",
"https://scap.nist.gov/schema/nvd/scap-core_0.3.xsd",
"http://scap.nist.gov/schema/configuration/0.1",
"https://scap.nist.gov/schema/nvd/configuration_0.1.xsd",
"http://scap.nist.gov/schema/scap-core/0.1",
"https://scap.nist.gov/schema/nvd/scap-core_0.1.xsd"]))
item = SubElement(cpes, 'cpe-item')
cpe_short_name = CPE.short_name(self.cpe_str)
cpe_new_ver = CPE.version_update(self.cpe_str)
item.set('name', 'cpe:/' + cpe_short_name)
self.titles[0].text.replace(self.cpe_cur_ver, cpe_new_ver)
for title in self.titles:
item.append(title)
if self.references:
item.append(self.references)
cpe23item = SubElement(item, 'cpe-23:cpe23-item')
cpe23item.set('name', self.cpe_str)
# Generate the XML as a string
xmlstr = ET.tostring(cpes)
# And use minidom to pretty print the XML
return minidom.parseString(xmlstr).toprettyxml(encoding="utf-8").decode("utf-8")
@staticmethod
def version(cpe):
return cpe.split(":")[5]
@staticmethod
def product(cpe):
return cpe.split(":")[4]
@staticmethod
def short_name(cpe):
return ":".join(cpe.split(":")[2:6])
@staticmethod
def version_update(cpe):
return ":".join(cpe.split(":")[5:6])
@staticmethod
def no_version(cpe):
return ":".join(cpe.split(":")[:5])
class CPEDB:
def __init__(self, nvd_path):
self.all_cpes = dict()
self.all_cpes_no_version = dict()
self.nvd_path = nvd_path
def get_xml_dict(self):
print("CPE: Setting up NIST dictionary")
if not os.path.exists(os.path.join(self.nvd_path, "cpe")):
os.makedirs(os.path.join(self.nvd_path, "cpe"))
cpe_dict_local = os.path.join(self.nvd_path, "cpe", os.path.basena
|
me(CPEDB_URL))
if not os.path.exists(cpe_dict_local) or os.stat(cpe_di
|
ct_local).st_mtime < time.time() - 86400:
print("CPE: Fetching xml manifest from [" + CPEDB_URL + "]")
cpe_dict = requests.get(CPEDB_URL)
open(cpe_dict_local, "wb").write(cpe_dict.content)
print("CPE: Unzipping xml manifest...")
nist_cpe_file = gzip.GzipFile(fileobj=open(cpe_dict_local, 'rb'))
print("CPE: Converting xml manifest to dict...")
tree = ET.parse(nist_cpe_file)
all_cpedb = tree.getroot()
self.parse_dict(all_cpedb)
def parse_dict(self, all_cpedb):
# Cycle through the dict and build two dict to be used for custom
# lookups of partial and complete CPE objects
# The objects are then used to create new proposed XML updates if
# if is determined one is required
# Out of the different language titles, select English
for cpe in all_cpedb.findall(".//{http://cpe.mitre.org/dictionary/2.0}cpe-item"):
cpe_titles = []
for title in cpe.findall('.//{http://cpe.mitre.org/dictionary/2.0}title[@xml:lang="en-US"]', ns):
title.tail = None
cpe_titles.append(title)
# Some older CPE don't include references, if they do, make
# sure we handle the case of one ref needing to be packed
# in a list
cpe_ref = cpe.find(".//{http://cpe.mitre.org/dictionary/2.0}references")
if cpe_ref:
for ref in cpe_ref.findall(".//{http://cpe.mitre.org/dictionary/2.0}reference"):
ref.tail = None
ref.text = ref.text.upper()
if ref.text not in VALID_REFS:
ref.text = ref.text + "-- UPDATE this entry, here are some examples and just one word should be used -- " + ' '.join(VALID_REFS) # noqa E501
cpe_ref.tail = None
cpe_ref.text = None
cpe_str = cpe.find(".//{http://scap.nist.gov/schema/cpe-extension/2.3}cpe23-item").get('name')
item = CPE(cpe_str, cpe_titles, cpe_ref)
cpe_str_no_version = CPE.no_version(cpe_str)
# This dict must have a unique key for every CPE version
# which allows matching to the specific obj data of that
# NIST dict entry
self.all_cpes.update({cpe_str: item})
# This dict has one entry for every CPE (w/o version) to allow
# partial match (no valid version) check (the obj is saved and
# used as seed for suggested xml updates. By updating the same
# non-version'd entry, it assumes the last update here is the
# latest version in the NIST dict)
self.all_cpes_no_version.update({cpe_str_no_version: item})
def find_partial(self, cpe_str):
cpe_str_no_version = CPE.no_version(cpe_str)
if cpe_str_no_version in self.all_cpes_no_version:
return cpe_str_no_version
def find_partial_obj(self, cpe_str):
cpe_str_no_version = CPE.no_version(cpe_str)
if cpe_str_no_version in self.all_cpes_no_version:
return self.all_cpes_no_version[cpe_str_no_version]
def find_partial_latest_version(self, cpe_str_partial):
cpe_obj = self.find_partial_obj(cpe_str_partial)
return cpe_obj.cpe_cur_ver
def find(self, cpe_str):
if self.find_partial(cpe_str):
if cpe_str in self.all_cpes:
return cpe_str
def gen_update_xml(self, cpe_str):
cpe = self.find_partial_obj(cpe_str)
return cpe.update_xml_dict()
|
UUDigitalHumanitieslab/timealign
|
annotations/management/commands/export_fragments.py
|
Python
|
mit
| 1,823 | 0.003291 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from annotations.models import Corpus
from annotations.exports import export_fragments
from core.utils import CSV, XLSX
class Command(BaseCommand):
help = 'Exports existing Fragments for the given Corpus and Languages'
def add_arguments(self, parser):
parser.add_argument('corpus', type=str)
parser.add_argument('languages', nargs='+', type=str)
parser.add_argument('--add_lemmata', action='store_true', dest='add_lemmata', default=False)
parser.add_argument('--add_indices', action='store_true', dest='add_indices', default=False)
parser.add_argument('--xlsx', action='store_true', dest='format_xlsx', default=False)
|
parser.add_argument('--doc', dest='document')
parser.add_argument('--formal_str
|
ucture')
def handle(self, *args, **options):
# Retrieve the Corpus from the database
try:
corpus = Corpus.objects.get(title=options['corpus'])
except Corpus.DoesNotExist:
raise CommandError('Corpus with title {} does not exist'.format(options['corpus']))
format_ = XLSX if options['format_xlsx'] else CSV
for language in options['languages']:
if not corpus.languages.filter(iso=language):
raise CommandError('Language {} does not exist'.format(language))
filename = 'fragments_{lang}.{ext}'.format(lang=language, ext=format_)
export_fragments(filename, format_, corpus, language,
document=options['document'],
add_lemmata=options['add_lemmata'],
add_indices=options['add_indices'],
formal_structure=options['formal_structure'])
|
joshtechnologygroup/dark-engine
|
engine/dark_matter/search_engine/similarity_checker.py
|
Python
|
gpl-3.0
| 9,843 | 0.003353 |
# Python/NLTK implementation of algorithm to detect similarity between
# short sentences described in the paper - "Sentence Similarity based
# on Semantic Nets and Corpus Statistics" by Li, et al.
# Results achieved are NOT identical to that reported in the paper, but
# this is very likely due to the differences in the way the algorithm was
# described in the paper and how I implemented it.
from __future__ import division
import nltk
from nltk.corpus import wordnet as wn
from nltk.corpus import brown
import math
import numpy as np
import sys
# Parameters to the algorithm. Currently set to values that was reported
# in the paper to produce "best" results.
ALPHA = 0.2
BETA = 0.45
ETA = 0.4
PHI = 0.2
DELTA = 0.85
brown_freqs = dict()
N = 0
######################### word similarity ##########################
def get_best_synset_pair(word_1, word_2):
"""
Choose the pair with highest path similarity among all pairs.
Mimics pattern-seeking behavior of humans.
"""
max_sim = -1.0
synsets_1 = wn.synsets(word_1)
synsets_2 = wn.synsets(word_2)
if len(synsets_1) == 0 or len(synsets_2) == 0:
return None, None
else:
max_sim = -1.0
best_pair = None, None
for synset_1 in synsets_1:
for synset_2 in synsets_2:
sim = wn.path_similarity(synset_1, synset_2)
if sim > max_sim:
max_sim = sim
best_pair = synset_1, synset_2
return best_pair
def length_dist(synset_1, synset_2):
"""
Return a measure of the length of the shortest path in the semantic
ontology (Wordnet in our case as well as the paper's) between two
synsets.
"""
l_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return 0.0
if synset_1 == synset_2:
# if synset_1 and synset_2 are the same synset return 0
l_dist = 0.0
else:
wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
if len(wset_1.intersection(wset_2)) > 0:
# if synset_1 != synset_2 but there is word overlap, return 1.0
l_dist = 1.0
else:
# just compute the shortest path between the two
l_dist = synset_1.shortest_path_distance(synset_2)
if l_dist is None:
l_dist = 0.0
# normalize path length to the range [0,1]
return math.exp(-ALPHA * l_dist)
def hierarchy_dist(synset_1, synset_2):
"""
Return a measure of depth in the ontology to model the fact that
nodes closer to the root are broader and have less semantic similarity
than nodes further away from the root.
"""
h_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return h_dist
if synset_1 == synset_2:
# return the depth of one of synset_1 or synset_2
h_dist = max([x[1] for x in synset_1.hypernym_distances()])
else:
# find the max depth of least common subsumer
hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
lcs_candidates = set(hypernyms_1.keys()).intersection(
set(hypernyms_2.keys()))
if len(lcs_candidates) > 0:
lcs_dists = []
for lcs_candidate in lcs_candidates:
lcs_d1 = 0
if hypernyms_1.has_key(lcs_candidate):
lcs_d1 = hypernyms_1[lcs_candidate]
lcs_d2 = 0
if hypernyms_2.has_key(lcs_candidate):
lcs_d2 = hypernyms_2[lcs_candidate]
lcs_dists.append(max([lcs_d1, lcs_d2]))
h_dist = max(lcs_dists)
else:
h_dist = 0
return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /
(math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))
def word_similarity(word_1, word_2):
synset_pair = get_best_synset_pair(word_1, word_2)
return (length_dist(synset_pair[0], synset_pair[1]) *
hierarchy_dist(synset_pair[0], synset_pair[1]))
######################### sentence similarity ##########################
def most_similar_word(word, word_set):
"""
Find the word in the joint word set that is most similar to the word
passed in. We use the algorithm above to compute word similarity between
the word and each word in the joint word set, and return the most similar
word and the actual similarity value.
"""
max_sim = -1.0
sim_word = ""
for ref_word in word_set:
sim = word_similarity(word, ref_word)
if sim > max_sim:
max_sim = sim
sim_word = ref_word
return sim_word, max_sim
def info_content(lookup_word):
"""
Uses the Brown corpus available in NLTK to calculate a Laplace
smoothed frequency distribution of words, then uses this information
to compute the information content of the lookup_word.
"""
global N
if N == 0:
# poor man's lazy evaluation
for sent in brown.sents():
for word in sent:
word = word.lower()
if not brown_freqs.has_key(word):
brown_freqs[word] = 0
brown_freqs[word] = brown_freqs[word] +
|
1
N = N + 1
lookup_word = lookup_word.lower()
n = 0 if not brown_freqs.has_key(lookup_word) else brown_freqs[lookup_word]
return 1.0 - (math.log(n + 1) / math.log(N + 1))
def semantic_vector(words, joint_words, info_content_norm):
"""
Computes the semantic vector of a sentence. The sentence is passed in as
a collection of words.
|
The size of the semantic vector is the same as the
size of the joint word set. The elements are 1 if a word in the sentence
already exists in the joint word set, or the similarity of the word to the
most similar word in the joint word set if it doesn't. Both values are
further normalized by the word's (and similar word's) information content
if info_content_norm is True.
"""
sent_set = set(words)
semvec = np.zeros(len(joint_words))
i = 0
for joint_word in joint_words:
if joint_word in sent_set:
# if word in union exists in the sentence, s(i) = 1 (unnormalized)
semvec[i] = 1.0
if info_content_norm:
semvec[i] = semvec[i] * math.pow(info_content(joint_word), 2)
else:
# find the most similar word in the joint set and set the sim value
sim_word, max_sim = most_similar_word(joint_word, sent_set)
semvec[i] = PHI if max_sim > PHI else 0.0
if info_content_norm:
semvec[i] = semvec[i] * info_content(joint_word) * info_content(sim_word)
i = i + 1
return semvec
def semantic_similarity(sentence_1, sentence_2, info_content_norm):
"""
Computes the semantic similarity between two sentences as the cosine
similarity between the semantic vectors computed for each sentence.
"""
words_1 = nltk.word_tokenize(sentence_1)
words_2 = nltk.word_tokenize(sentence_2)
joint_words = set(words_1).union(set(words_2))
vec_1 = semantic_vector(words_1, joint_words, info_content_norm)
vec_2 = semantic_vector(words_2, joint_words, info_content_norm)
return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))
######################### word order similarity ##########################
def word_order_vector(words, joint_words, windex):
"""
Computes the word order vector for a sentence. The sentence is passed
in as a collection of words. The size of the word order vector is the
same as the size of the joint word set. The elements of the word order
vector are the position mapping (from the windex dictionary) of the
word in the joint set if the word exists in the sentence. If the word
does not exist in the sentence, then the value of the element is the
position of the most similar word in the sentence as long as the similarity
is above the threshold ETA.
"""
wovec = np.zeros(len(joint_words))
i = 0
wordse
|
victorclf/jcc-web
|
server/test/controller/test_partition.py
|
Python
|
agpl-3.0
| 2,710 | 0.007749 |
import unittest
import os
import shutil
import filecmp
import pytest
import marks
import options
import util
from controller.partition import PartitionController
class PartitionControllerTest(unittest.TestCase):
TEST_DATA_PATH = os.path.join(os.getcwd(), 'testdata')
def setUp(self):
self.controller = PartitionController()
self.projectOwner = 'victorclf'
self.projectName = 'jcc-web-persontest'
self.projectId = '%s/%s' % (self.projectOwner, self.projectName)
self.pullRequestId = 1
self.pullPath = os.path.join(options.PULL_REQUESTS_PATH, self.projectOwner, self.projectName, str(self.pullRequestId))
self.tearDown()
util.makedirsIfNotExists(options.PULL_REQUESTS_PATH)
shutil.copytree(os.path.join(self.TEST_DATA_PATH, self.projectOwner),
os.path.join(options.PULL_REQUESTS_PATH, self.projectOwner),
ignore=lambda root,files: [f for f in files if f.endswith('.old')])
def tearDown(self):
shutil.rmtree(options.PULL_REQUESTS_PATH, True)
shutil.rmtree(os.path.join(os.getcwd(), 'workspace'), True)
@marks.slow
def testDownloadPullRequest(self):
shutil.rmtree(options.PULL_REQUESTS_PATH, True)
self.assertFalse(os.path.exists(os.path.join(self.pullPath)))
self.assertTrue(self.controller._downloadPullRequestFromGitHub(self.projectId, self.pullRequestId))
self.assertTrue(os.path.exists(os.path.join(self.pullPath)))
self.assertFalse(self.controller._downloadPullRequestFromGitHub(self.projectId, self.pullRequestId))
@marks.slow
def testPartitionPullRequest(self):
self.controller._partitionPullRequest(self.projectId, self.pullRequestId)
self.assertTrue(os.path.exists(os.path.join(self.pullPath, options.PARTITION_RESULTS_FOLDER_NAME)))
self.assertTrue(os.path.exists(os.path.join(self.pullPath, options.PARTITION_RESULTS_FOLDER_NAME, 'partitions.csv')))
for root, dirs, files in os.walk(self.pullPath):
for f in files:
if f.endswith('java')
|
:
oldF = os.path.join(root, f) + '.old'
self.assertTrue(os.path.exists(oldF))
expectedOldF = os.path.join(self.TEST_DATA_PATH, '..', os.path.relpath(oldF))
self.assertTrue(filecmp.cmp(oldF, expectedOldF, False))
|
self.assertFalse(filecmp.cmp(oldF, os.path.join(root, f), False))
def testGetPartitionJSON(self):
pJSON = self.controller.getPartitionJSON(self.projectOwner, self.projectName, self.pullRequestId)
self.assertTrue(pJSON)
|
boto/botocore
|
tests/unit/test_http_session.py
|
Python
|
apache-2.0
| 18,102 | 0.000221 |
import socket
import pytest
from urllib3.exceptions import (
NewConnectionError,
ProtocolError,
ProxyError,
)
from tests import mock, unittest
from botocore.awsrequest import (
AWSRequest,
AWSHTTPConnectionPool,
AWSHTTPSConnectionPool,
)
from botocore.httpsession import (
get_cert_path,
mask_proxy_url,
URLLib3Session,
ProxyConfiguration,
)
from botocore.exceptions import (
ConnectionClosedError,
EndpointConnectionError,
ProxyConnectionError,
)
class TestProxyConfiguration(unittest.TestCase):
def setUp(self):
self.url = 'http://localhost/'
self.auth_url = 'http://user:pass@localhost/'
self.proxy_config = ProxyConfiguration(
proxies={'http': 'http://localhost:8081/'}
)
def update_http_proxy(self, url):
self.proxy_config = ProxyConfiguration(
proxies={'http': url}
)
def test_construct_proxy_headers_with_auth(self):
headers = self.proxy_config.proxy_headers_for(self.auth_url)
proxy_auth = headers.get('Proxy-Authorization')
self.assertEqual('Basic dXNlcjpwYXNz', proxy_auth)
def test_construct_proxy_headers_without_auth(self):
headers = self.proxy_config.proxy_headers_for(self.url)
self.assertEqual({}, headers)
def test_proxy_for_url_no_slashes(self):
self.update_http_proxy('localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
def test_proxy_for_url_no_protocol(self):
self.update_http_proxy('//localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
def test_fix_proxy_url_has_protocol_http(self):
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
class TestHttpSessionUtils(unittest.TestCase):
def test_get_cert_path_path(self):
path = '/some/path'
cert_path = get_cert_path(path)
self.assertEqual(path, cert_path)
def test_get_cert_path_certifi_or_default(self):
with mock.patch('botocore.httpsession.where') as where:
path = '/bundle/path'
where.return_value = path
cert_path = get_cert_path(True)
self.assertEqual(path, cert_path)
@pytest.mark.parametrize(
'proxy_url, expected_mask_url',
(
(
'http://myproxy.amazonaws.com',
'http://myproxy.amazonaws.com'
),
(
'http://user@myproxy.amazonaws.com',
'http://***@myproxy.amazonaws.com'
),
(
'http://user:pass@myproxy.amazonaws.com',
'http://***:***@myproxy.amazonaws.com'
),
(
'https://user:pass@myproxy.amazonaws
|
.com',
'https://***:***@myproxy.amazonaws.com'
),
(
'http://user:pass@localhost',
'http://***:***@localhost'
),
(
'http://user:pass@localhost:80',
'http://***:***@localhost:80'
),
(
'http://user:pass@userpass.com',
'http://***:***@userpass.com'
),
(
'http://user:pass@192.168.1.1',
'h
|
ttp://***:***@192.168.1.1'
),
(
'http://user:pass@[::1]',
'http://***:***@[::1]'
),
(
'http://user:pass@[::1]:80',
'http://***:***@[::1]:80'
),
)
)
def test_mask_proxy_url(proxy_url, expected_mask_url):
assert mask_proxy_url(proxy_url) == expected_mask_url
class TestURLLib3Session(unittest.TestCase):
def setUp(self):
self.request = AWSRequest(
method='GET',
url='http://example.com/',
headers={},
data=b'',
)
self.response = mock.Mock()
self.response.headers = {}
self.response.stream.return_value = b''
self.pool_manager = mock.Mock()
self.connection = mock.Mock()
self.connection.urlopen.return_value = self.response
self.pool_manager.connection_from_url.return_value = self.connection
self.pool_patch = mock.patch('botocore.httpsession.PoolManager')
self.proxy_patch = mock.patch('botocore.httpsession.proxy_from_url')
self.pool_manager_cls = self.pool_patch.start()
self.proxy_manager_fun = self.proxy_patch.start()
self.pool_manager_cls.return_value = self.pool_manager
self.proxy_manager_fun.return_value = self.pool_manager
def tearDown(self):
self.pool_patch.stop()
self.proxy_patch.stop()
def assert_request_sent(self, headers=None, body=None, url='/', chunked=False):
if headers is None:
headers = {}
self.connection.urlopen.assert_called_once_with(
method=self.request.method,
url=url,
body=body,
headers=headers,
retries=mock.ANY,
assert_same_host=False,
preload_content=False,
decode_content=False,
chunked=chunked,
)
def _assert_manager_call(self, manager, *assert_args, **assert_kwargs):
call_kwargs = {
'strict': True,
'maxsize': mock.ANY,
'timeout': mock.ANY,
'ssl_context': mock.ANY,
'socket_options': [],
'cert_file': None,
'key_file': None,
}
call_kwargs.update(assert_kwargs)
manager.assert_called_with(*assert_args, **call_kwargs)
def assert_pool_manager_call(self, *args, **kwargs):
self._assert_manager_call(self.pool_manager_cls, *args, **kwargs)
def assert_proxy_manager_call(self, *args, **kwargs):
self._assert_manager_call(self.proxy_manager_fun, *args, **kwargs)
def test_forwards_max_pool_size(self):
URLLib3Session(max_pool_connections=22)
self.assert_pool_manager_call(maxsize=22)
def test_forwards_client_cert(self):
URLLib3Session(client_cert='/some/cert')
self.assert_pool_manager_call(cert_file='/some/cert', key_file=None)
def test_forwards_client_cert_and_key_tuple(self):
cert = ('/some/cert', '/some/key')
URLLib3Session(client_cert=cert)
self.assert_pool_manager_call(cert_file=cert[0], key_file=cert[1])
def test_proxies_config_settings(self):
proxies = {'http': 'http://proxy.com'}
proxies_config = {
'proxy_ca_bundle': 'path/to/bundle',
'proxy_client_cert': ('path/to/cert', 'path/to/key'),
'proxy_use_forwarding_for_https': False,
}
use_forwarding = proxies_config['proxy_use_forwarding_for_https']
with mock.patch('botocore.httpsession.create_urllib3_context'):
session = URLLib3Session(
proxies=proxies,
proxies_config=proxies_config
)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
proxy_ssl_context=mock.ANY,
use_forwarding_for_https=use_forwarding
)
self.assert_request_sent(url=self.request.url)
def test_proxies_config_settings_unknown_config(self):
proxies = {'http': 'http://proxy.com'}
proxies_config = {
'proxy_ca_bundle': None,
'proxy_client_cert': None,
'proxy_use_forwarding_for_https': True,
'proxy_not_a_real_arg': 'do not pass'
}
use_forwarding = proxies_config['proxy_use_forwarding_for_https']
session = URLLib3Session(
proxies=proxies,
proxies_config=proxies_config
)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
use_forwarding_for_https=use_forwarding
)
|
Victordeleon/os-data-importers
|
eu-structural-funds/common/config.py
|
Python
|
mit
| 3,382 | 0.002365 |
"""Pipeline configuration parameters."""
from os.path import dirname, abspath, join
from sqlalchemy import create_engine
OS_TYPES_URL = ('https://raw.githubusercontent.com/'
'openspending/os-types/master/src/os-types.json')
PIPELINE_FILE = 'pipeline-spec.yaml'
SOURCE_DATAPACKAGE_FILE = 'source.datapackage.json'
SOURCE_FILE = 'source.description.yaml'
STATUS_FILE = 'pipeline-status.json'
SCRAPER_FILE = 'scraper.py'
SOURCE_ZIP = 'source.datapackage.zip'
FISCAL_ZIP_FILE = 'fiscal.datapackage.zip'
SOURCE_DB = 'source.db.xlsx'
DATAPACKAGE_FILE = 'datapackage.json'
ROOT_DIR = abspath(join(dirname(__file__), '..'))
DATA_DIR = join(ROOT_DIR, 'data')
SPECIFICATIONS_DIR = join(ROOT_DIR, 'specifications')
PROCESSORS_DIR = join(ROOT_DIR, 'common', 'processors')
CODELISTS_DIR = join(ROOT_DIR, 'codelists')
DROPBOX_DIR = join(ROOT_DIR, 'dropbox')
GEOCODES_FILE = join(ROOT_DIR, 'geography', 'geocodes.nuts.csv')
FISCAL_SCHEMA_FILE = join(SPECIFICATIONS_DIR, 'fiscal.schema.yaml')
FISCAL_MODEL_FILE = join(SPECIFICATIONS_DIR, 'fiscal.model.yaml')
FISCAL_METADATA_FILE = join(SPECIFICATIONS_DIR, 'fiscal.metadata.yaml')
DEFAULT_PIPELINE_FILE = join(SPECIFICATIONS_DIR, 'default-pipeline-spec.yaml')
TEMPLATE_SCRAPER_FILE = join(PROCESSORS_DIR, 'scraper_template.py')
DESCRIPTION_SCHEMA_FILE = join(SPECIFICATIONS_DIR, 'source.schema.json')
TEMPLATE_SOURCE_FILE = join(SPECIFICATIONS_DIR, SOURCE_FILE)
LOCAL_PATH_EXTRACTOR = 'ingest_local_file'
REMOTE_CSV_EXTRACTOR = 'simple_remote_source'
REMOTE_EXCEL_EXTRACTOR = 'stream_remote_excel'
DATAPACKAGE_MUTATOR = 'mutate_datapackage'
DB_URI = 'sqlite:///{}/metrics.sqlite'
DB_ENGINE = create_engine(DB_URI.format(ROOT_DIR))
VERBOSE = False
LOG_SAMPLE_SIZE = 15
JSON_FORMAT = dict(indent=4, ensure_ascii=False, default=repr)
SNIFFER_SAMPLE_SIZE = 5000
SNIFFER_MAX_FAILURE_RATIO = 0.01
IGNORED_FIELD_TAG = '_ignored'
UNKNOWN_FIELD_TAG = '_unknown'
WARNING_CUTOFF = 10
NUMBER_FORMATS = [
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ','},
{'format':
|
'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': '.'},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ' '},
{'format': 'default', '
|
bareNumber': False, 'decimalChar': ',', 'groupChar': ' '},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ''},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': '`'},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': '\''},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': ' '},
]
DATE_FORMATS = [
{'format': '%Y'},
{'format': '%d/%m/%Y'},
{'format': '%d//%m/%Y'},
{'format': '%d-%b-%Y'}, # abbreviated month
{'format': '%d-%b-%y'}, # abbreviated month
{'format': '%d. %b %y'}, # abbreviated month
{'format': '%b %y'}, # abbreviated month
{'format': '%d/%m/%y'},
{'format': '%d-%m-%Y'},
{'format': '%Y-%m-%d'},
{'format': '%y-%m-%d'},
{'format': '%y.%m.%d'},
{'format': '%Y.%m.%d'},
{'format': '%d.%m.%Y'},
{'format': '%d.%m.%y'},
{'format': '%d.%m.%Y %H:%M'},
{'format': '%Y-%m-%d %H:%M:%S'},
{'format': '%Y-%m-%d %H:%M:%S.%f'},
{'format': '%Y-%m-%dT%H:%M:%SZ'},
{'format': '%m/%d/%Y'},
{'format': '%m/%Y'},
{'format': '%y'},
]
|
lotem/rime.py
|
weasel/weasel.py
|
Python
|
gpl-3.0
| 9,656 | 0.003598 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:set et sts=4 sw=4:
__all__ = (
"WeaselSession",
"WeaselService",
"service",
)
import logging
import logging.config
import os
import time
import threading
logfile = os.path.join(os.path.dirname(__file__), "logging.conf")
logging.config.fileConfig(logfile)
logger = logging.getLogger("weasel")
import ibus
from core import *
from engine import *
import storage
def add_text(actions, msg, field, text):
actions.add(u'ctx')
(s, attrs, cursor) = text
msg.append(u'ctx.%s=%s\n' % (field, s))
if attrs:
msg.append(u'ctx.%s.attr.length=%d\n' % (field, len(attrs)))
for i in range(len(attrs)):
(extent, type_) = attrs[i]
msg.append(u'ctx.%s.attr.%d.range=%d,%d\n' % (field, i, extent[0], extent[1]))
msg.append(u'ctx.%s.attr.%d.type=%s\n' % (field, i, type_))
if cursor:
msg.append(u'ctx.%s.cursor=%d,%d\n' % (field, cursor[0], cursor[1]))
def add_cand(actions, msg, cand_info):
actions.add(u'ctx')
(current_page, total_pages, cursor, cands) = cand_info
n = len(cands)
msg.append(u'ctx.cand.length=%d\n' % n)
for i in range(n):
msg.append(u'ctx.cand.%d=%s\n' % (i, cands[i][0]))
msg.append(u'ctx.cand.cursor=%d\n' % cursor)
msg.append(u'ctx.cand.page=%d/%d\n' % (current_page, total_pages))
#msg.append(u'ctx.cand.current_page=%d\n' % current_page)
#msg.append(u'ctx.cand.total_pages=%d\n' % total_pages)
class WeaselSession:
'''【小狼毫】會話
承擔Rime算法引擎與【小狼毫】前端的交互
'''
def __init__(self, params=None):
logger.info("init weasel session: %s", params)
self.__page_size = storage.DB.read_setting(u'Option/PageSize') or 5
self.__lookup_table = ibus.LookupTable(self.__page_size)
self.__clear()
self.__backend = Engine(self, params)
def __clear(self):
self.__commit = None
self.__preedit = None
self.__aux = None
self.__cand = None
def process_key_event(self, keycode, mask):
'''處理鍵盤事件'''
logger.debug("process_key_event: '%s'(%x), %08x" % \
(keysyms.keycode_to_name(keycode), keycode, mask))
self.__clear()
taken = self.__backend.process_key_event(KeyEvent(keycode, mask))
return taken
def get_response(self):
'''生成回應消息'''
actions = set()
msg = list()
if self.__commit:
actions.add(u'commit')
msg.append(u'commit=%s\n' % u''.join(self.__commit))
if self.__preedit:
add_text(actions, msg, u'preedit', self.__preedit)
if self.__aux:
add_text(actions, msg, u'aux', self.__aux)
if self.__cand:
add_cand(actions, msg, self.__cand)
#self.__clear()
if not actions:
return u'action=noop\n.\n'
else:
# starts with an action list
msg.insert(0, u'action=%s\n' % u','.join(sorted(actions)))
# ends with a single dot
msg.append(u'.\n')
return u''.join(msg)
# implement a frontend proxy for rime engine
def commit_string(self, s):
'''文字上屏'''
logger.debug(u'commit: [%s]' % s)
if self.__commit:
self.__commit.append(s)
else:
self.__commit = [s]
def update_preedit(self, s, start=0, end=0):
'''更新寫作串
[start, end) 定義了串中的高亮區間
'''
if start < end:
logger.debug(u'preedit: [%s[%s]%s]' % (s[:start], s[start:end], s[end:]))
else:
logger.debug(u'preedit: [%s]' % s)
#attrs = [((start, end), u'HIGHLIGHTED')] if start < end else None
#self.__preedit = (s, attrs)
cursor = (start, end) if start < end else None
self.__preedit = (s, None, cursor)
def update_aux(self, s, start=0, end=0):
'''更新輔助串
[start, end) 定義了串中的高亮區間
'''
if start < end:
logger.debug(u'aux: [%s[%s]%s]' % (s[:start], s[start:end], s[end:]))
else:
logger.debug(u'aux: [%s]' % s)
cursor = (start, end) if start < end else None
self.__aux = (s, None, cursor)
def update_candidates(self, candidates):
'''更新候選列表'''
self.__lookup_table.clean()
self.__lookup_table.show_cursor(False)
if not candidates:
self.__cand = (0, 0, 0, [])
else:
for c in candidates:
self.__lookup_table.append_candidate(ibus.Text(c[0]))
self.__update_page()
def __update_page(self):
candidates = self.__lookup_table.get_candidates_in_current_page()
n = self.__lookup_table.get_number_of_candidates()
c = self.__lookup_table.get_cursor_pos()
p = self.__lookup_table.get_page_size()
current_page = c / p
total_pages = (n + p - 1) / p
cands = [(x.get_text(), None) for x in candidates]
self.__cand = (current_page, total_pages, c % p, cands)
def page_up(self):
if self.__lookup_table.page_up():
#print u'page up.'
self.__update_page()
return True
return False
def page_down(self):
if self.__lookup_table.page_down():
#print u'page down.'
self.__update_page()
return True
return False
def cursor_up(self):
if self.__lookup_table.cursor_up():
#print u'cursor up.'
self.__update_page()
return True
return False
def cursor_down(self):
if self.__lookup_table.cursor_down():
#print u'cursor down.'
self.__update_page()
return True
return False
def get_candidate_index(self, number):
if number >= self.__page_size:
return -1
index = number + self.__lookup_table.get_current_page_start()
#print u'cand index = %d' % index
return index
def get_highlighted_candidate_index(self):
index = self.__lookup_table.get_cursor_pos()
#print u'highlighted cand index = %d' % index
return index
class WeaselService:
'''【小狼毫】算法服務
管理一組會話
每個會話對象持有一個算法引擎實例,並響應一個IME前端的輸入請求
'''
SESSION_EXPIRE_TIME = 3 * 60 # 3 min.
def __init__(self):
self.__sessions = dict()
self.__timer = None
def cleanup(self):
'''清除所有會話'''
logger.info("cleaning up %d remaining sessions." % len(self.__sessions))
self.cancel_check()
self.__sessions.clear()
def schedule_next_check(self):
self.cancel_check()
self.__timer = threading.Timer(WeaselService.SESSION_EXPIRE_TIME + 10, \
lambda: self.check_stale_sessions())
self.__timer.start()
def cancel_check(self):
if self.__timer:
self.__timer.cancel()
self.__timer = None
def check
|
_stale_sessions(self):
'''檢查過期的回話'''
logger.info("check_stale_sessions...")
expire_time = time.time() - WeaselService.SESSION_EXPIRE_TIME
for sid in self.__sessions.keys():
if self.__sessions[sid].last_active_time < expire_time:
logger.info("removing stale session #%x." % sid)
|
self.destroy_session(sid)
# 還有活動會話,計劃下一次檢查
self.__timer = None
if self.__sessions:
self.schedule_next_check()
def has_session(self, sid):
'''檢查指定會話的存在狀態'''
if sid in self.__sessions:
return True
else:
return False
def get_session(self, sid):
'''按標識獲取會話對象
以傳遞按鍵消息等
'''
if sid in self.__sessions:
session = self.__sessions[sid]
session.last_active_time = time.time()
return session
else:
return None
def create_session(self):
'''創建會話
IME前端開啟輸入法時調用
返回會話的標識(正整數)
'''
try:
session = WeaselSession()
session.last_active_time = time.time()
except Exception, e:
|
rahul-ramadas/BagOfTricks
|
InsertMarkdownLink.py
|
Python
|
unlicense
| 1,987 | 0.003523 |
import sublime
import sublime_plugin
MARKDOWN_LINK_SNIPPET = "[${{1:{}}}](${{2:{}}})"
class InsertMarkdownLinkCommand(sublime_plugin.TextCommand):
def decode_page(self, page_bytes, potential_encoding=None):
if potential_encoding:
try:
text = page_bytes.decode(potential_encoding)
return text
except:
pass
encodings_to_try = ["utf-8", "iso-8859-1"]
for encoding in encodings_to_try:
if encoding == potential_encoding:
continue
try:
text = page_bytes.decode(encoding)
return text
except:
pass
raise UnicodeDecodeError
def run(self, edit):
import r
|
e
def on_done(link):
import urllib.request
request = urllib.request.Request(link, headers={'User-Agent' : 'Google Internal-Only Browser'})
with urllib.request.urlopen(request) as page:
encoding = page.headers.get_content_charset()
text = self.decode_p
|
age(page.read(), encoding)
match = re.search("<title>(.+?)</title>", text, re.IGNORECASE | re.DOTALL)
if match is None:
title = link
else:
title = match.group(1).strip()
markdown_link = MARKDOWN_LINK_SNIPPET.format(title, link)
self.view.run_command("insert_snippet", {"contents": markdown_link})
clipboard_text = sublime.get_clipboard(2000)
if re.match("https?://", clipboard_text, re.IGNORECASE) is not None:
initial_text = clipboard_text
else:
initial_text = ""
input_view = self.view.window().show_input_panel("Link", initial_text, on_done, None, None)
input_view.sel().clear()
input_view.sel().add(sublime.Region(0, input_view.size()))
|
ostree/plaso
|
tests/parsers/mac_keychain.py
|
Python
|
apache-2.0
| 3,604 | 0.00111 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Keychain password database parser."""
import unittest
from plaso.formatters import mac_keychain as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import mac_keychain
from tests.parsers import test_lib
class MacKeychainParserTest(test_lib.ParserTestCase):
"""Tests for keychain file parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = mac_keychain.KeychainParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath([u'login.keychain'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 5)
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:51:48')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.CREATION_TIME)
self.assertEqual(event_object.entry_name, u'Secret Application')
self.assertEqual(event_object.account_name, u'moxilo')
expected_ssgp = (
u'b8e44863af1cb0785b89681d22e2721997ccfb8adb8853e726aff94c8830b05a')
self.assertEqual(event_object.ssgp_hash, expected_ssgp)
self.assertEqual(event_object.text_description, u'N/A')
expected_msg = u'Name: Secret Application Account: moxilo'
expected_msg_short = u'Secret Application'
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[1]
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:52:29')
self.assertEqual(event_object.timestamp, expected_timestamp)
event_object = event_objects[2]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:53:29')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.entry_name, u'Secret Note')
self.assertEqual(event_object.text_description, u'secure note')
self.assertEqual(len(event_object.ssgp_hash), 1696)
expected_msg = u'Name
|
: Secret Note'
expected_msg_short = u'Secret Note'
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[3]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:54:33')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.entry_name, u'plaso.kiddaland.net')
self.assertEqual(event_object.
|
account_name, u'MrMoreno')
expected_ssgp = (
u'83ccacf55a8cb656d340ec405e9d8b308fac54bb79c5c9b0219bd0d700c3c521')
self.assertEqual(event_object.ssgp_hash, expected_ssgp)
self.assertEqual(event_object.where, u'plaso.kiddaland.net')
self.assertEqual(event_object.protocol, u'http')
self.assertEqual(event_object.type_protocol, u'dflt')
self.assertEqual(event_object.text_description, u'N/A')
expected_msg = (
u'Name: plaso.kiddaland.net '
u'Account: MrMoreno '
u'Where: plaso.kiddaland.net '
u'Protocol: http (dflt)')
expected_msg_short = u'plaso.kiddaland.net'
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
|
TrondKjeldas/knxmonitor
|
test/test_KnxParser.py
|
Python
|
gpl-2.0
| 4,428 | 0.007227 |
import unittest
from time import mktime, strptime
from knxmonitor.Knx.KnxParser import KnxParser
class TestKnxParser(unittest.TestCase):
def setUp(self):
self.parser = KnxParser("enheter.xml", "groupaddresses.csv", False, False,
{ "1/1/14" : "onoff",
"1/1/15" : "temp",
"1/1/16" : "time",
"1/1/17" : "%%"})
def test_init(self):
p = KnxParser("enheter.xml", "groupaddresses.csv", False, False,
{ "1/1/14" : "onoff",
"1/1/15" : "temp",
"1/1/16" : "time",
"1/1/17" : "%%"})
self.assertIsInstance(p, KnxParser)
def test_setTimeBase(self):
basetime = mktime(strptime("Fri Sep 4 06:15:03 2015",
"%a %b %d %H:%M:%S %Y"))
try:
self.parser.setTimeBase(basetime)
except:
self.fail("setTimeBase raised exception")
def test_parseVbusOutput(self):
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 1)
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 2)
self.parser.parseVbusOutput(2, "Fri Dec 10 14:08:59 2010", "Fri Dec 10 14:08:59 2010:LPDU: B0 FF FF 00 00 E3 00 C0 11 1B 66 :L_Data system from 15.15.255 to 0/0/0 hops: 06 T_DATA_XXX_REQ A_IndividualAddre
|
ss_Write 1.1.27")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 2)
self.parser.parseVbusOutput(3, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 2/7/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 2)
@unittest.skip("Cache functionality not finished yet.")
|
def test_storeCachedInput(self):
pass
def test_getStreamMinMaxValues(self):
self.assertEqual(self.parser.getStreamMinMaxValues("1/1/15"), (None, None))
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.assertEqual(self.parser.getStreamMinMaxValues("1/1/15"), ("-15.37","5.11"))
self.assertEqual(self.parser.getStreamMinMaxValues("666/1/15"), (None, None))
def test_printStreams(self):
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.parser.printStreams(["1/1/15"])
@unittest.skip("Does not play well with Travis CI environment at the moment...")
def test_plotStreams(self):
basetime = mktime(strptime("Fri Sep 4 06:15:00 2015",
"%a %b %d %H:%M:%S %Y"))
self.parser.setTimeBase(basetime)
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:06 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.parser.plotStreams(["1/1/15"], "testimg.png", 0.0)
if __name__ == '__m':
unittest.main()
|
nagisa/Feeds
|
gdist/gschemas.py
|
Python
|
gpl-2.0
| 2,161 | 0.001851 |
import glob
import os
from distutils.dep_util import newer
from distutils.core import Command
from distutils.spawn import find_executable
from distutils.util import change_root
class build_gschemas(Command):
"""build message catalog files
Build message catalog (.mo) files from .po files using xgettext
and intltool. These are placed directly in the build tree.
"""
description = "build gschemas used for dconf"
user_options = []
build_base = None
def initialize_options(self):
pass
def finalize_options(self):
self.gschemas_directory = self.distribution.gschemas
self.set_undefined_options('build', ('build_base', 'build_base'))
def run(self):
if find_executable("glib-compile-schemas") is None:
raise SystemExit("Error: 'glib-compile-schemas' not found.")
basepath = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
self.copy_tree(self.gschemas_directory, basepath)
class install_gschemas(Command):
"""install message catalog files
Copy compiled message catalog files into their installation
directory, $prefix/share/locale/$lang/LC_MESSAGES/$package.mo.
"""
description = "install message catalog files"
user_options = []
skip_build = None
build_base = None
install_base = None
root = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('build', ('build_base', 'build_base'))
self.set_undefined_options(
'install',
('root', 'root'),
('install_base', 'install_base'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build_gschemas')
src = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
dest = os.path.join(sel
|
f.install_base, 'share', 'glib-2.0', 'schemas')
if self.root != None:
dest = change_root(self.root, dest)
self.copy_tree(src, dest)
self.spawn([
|
'glib-compile-schemas', dest])
__all__ = ["build_gschemas", "install_gschemas"]
|
xuweiliang/Codelibrary
|
openstack_dashboard/dashboards/admin/volumes_back/snapshots/urls.py
|
Python
|
apache-2.0
| 922 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not
|
use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed
|
to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.volumes.snapshots import views
urlpatterns = [
url(r'^(?P<snapshot_id>[^/]+)$',
views.DetailView.as_view(),
name='detail'),
url(r'^(?P<snapshot_id>[^/]+)/update_status/$',
views.UpdateStatusView.as_view(),
name='update_status'),
]
|
PyLadiesPoznanAdvanced/django-introduction-bmi
|
calculators/tests_view.py
|
Python
|
mit
| 1,112 | 0.002708 |
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
class ViewTests(TestCase):
def setUp(self):
self.client = Client()
def test_bmi_calculator_main_page_1(self):
response = self.client.get(reverse('calculators:main_bmi'))
self.assertEqual(response.status_code, 200)
def test_bmi_calculator_main_page_2(self):
response = self.client.get(reverse('calculators:main_bmi'))
self.assertEqual(response.context["sex"], ("kobieta", "mężczyzna"))
def test_bmi_calculator_main_page_3(self):
"""A list of Template instances used to render the final content,
in the order they were rendered. For each template in the list,
use template.name to get the template’s file name, if the template was loaded from a file. """
response = self.client.get(reverse('calculators:main_b
|
mi'))
self.assertEqual(response.templates[0].name, "calculators/b
|
mi.html")
# SimpleTestCase.assertContains(response, text, count=None, status_code=200, msg_prefix='', html=False)
# TODO a lot of tests
|
bossiernesto/django-bootstrap3-classview
|
django_bootstrap3view/django_bootstrap3view_app/services/base.py
|
Python
|
bsd-3-clause
| 6,504 | 0.001538 |
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from django_bootstrap3view.django_bootstrap3view_app.utils.render import render, render_string
from django_bootstrap3view.django_bootstrap3view_app.utils.python import convert_to_bool
class BaseService(object):
_repo = property(fget=lambda self: self.entity.objects)
_page_size = 10
default_query_params = {}
def __getattr__(self, name):
"""
Delegates automatically all undefined methods on the repository entity.
"""
def decorator(*args, **kwargs):
method = getattr(self._repo, name)
if method is None:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__.__name__, name))
if not kwargs.pop("without_filters", False):
for key, value in self.default_query_params.iteritems():
kwargs.setdefault(key, value)
return method(*args, **kwargs)
return decorator
def get_page(self, page=0, size=None, min_page=None, **kwargs):
if size is None:
size = self._page_size
page = int(page)
if min_page is not None:
min_page = int(min_page)
limit = (page + 1) * size
offset = min_page * size
else:
limit = (page + 1) * size
offset = size * page
return self._get_objects(self._get_page_query(offset, limit, **kwargs))
def _get_page_query(self, offset, limit, **kwargs):
return self.all()[offset:limit]
def list(self, start, size, **kwargs):
page = int(start / size)
return self.get_page(page=page, size=size, min_page=None, **kwargs)
def _get_objects(self, objects):
""" Override to add behaviour """
return objects
def get_one(self, *args, **kwargs):
objects = self.filter(*args, **kwargs)
return objects[0] if objects else None
def new(self, *args, **kwargs):
return self.entity(*args, **kwargs)
def _get_or_new(self, *args, **kwargs):
try:
obj, created = self.get_or_create(*args, **kwargs)
except:
obj, created = self.entity(*args, **kwargs), True
return obj, created
def get_or_new(self, *args, **kwargs):
obj, _ = self._get_or_new(*args, **kwargs)
return obj
def update_or_create(self, pre_create_function=None, pre_update_function=None, *args, **kwargs):
entity_id = kwargs.pop("id", None)
if entity_id:
if pre_update_function is not None:
pre_update_function(kwargs)
entity = self.get(id=entity_id)
for key, value in kwargs.iteritems():
setattr(entity, key, value)
else:
if pre_create_function is not None:
pre_create_function(kwargs)
entity = self.new(**kwargs)
entity.save()
return entity
def get_or_new_created(self, *args, **kwargs):
return self._get_or_new(*args, **kwargs)
def get_form(self):
return None
def _get_data(self, request, *args, **kwargs):
data = dict([(key, value) for key, value in request.POST.iteritems() if key != "csrfmiddlewaretoken"])
data.update(self._get_additional_data(request))
return data
def _get_additional_data(self, request, *args, **kwargs):
return {}
def _get_entity(self, request, *args, **kwargs):
return self.get_or_new(**self._get_data(request))
def _set_data(self, entity, request, *args, **kwargs):
data = self._get_data(request)
for key, value in data.iteritems():
setattr(entity, key, value)
return entity
def set_attrs(self, entity, attrs):
for key, value in attrs.iteritems():
setattr(entity, key, value)
def save_entity(self, entity, *args, **kwargs):
entity.save()
def save(self, request, *args, **kwargs):
entity = self._get_entity(request, *args, **kwargs)
self._set_data(entity, request, *args, **kwargs)
self.save_entity(entity, *args, **kwargs)
self._post_save(entity, request, *args, **kwargs)
return entity
def _post_save(self, entity, request, *args, **kwargs):
pass
def render(self, template, context):
return render(template, context)
def render_string(self, string, context):
return render_string(string, context)
def get_object_or_404(self, **kwargs):
return get_object_or_404(self.entity, **kwargs)
def delete(self, *args, **kwargs):
logical_delete = kwargs.pop("logical", False)
objs = self.filter(*args, **kwargs)
if not objs:
return False
for obj in objs:
if not logical_delete:
obj.delete()
else:
obj.active = False
obj.save()
return True
def get_formated_sum(self, value):
if value is None:
value = 0
return "%.2f" % value
def _render_ro
|
w_value(self, row_data, render):
if isinstance(render, str):
if isinstance(row_data, dict):
return str(row_data[render])
else:
return str(getattr(row_data, render))
else:
return str(render(row_data))
def get_params(self, data, params):
dict_params = {}
|
for param in params:
dict_params[param] = data.get(param)
return dict_params
def convert_to_bool(self, data, params):
convert_to_bool(data, params)
def to_bool(self, param):
return bool(int(param))
def get_action_params(self, request, params_names, prefix="", bar_action=True):
complete_names = ["%s%s" % (prefix, param) for param in params_names]
params = self.get_params(request.POST, complete_names)
if bar_action:
boolean_params = ["%s%s" % (prefix, param) for param in ["is_main_action", "is_side_action"]]
self.convert_to_bool(params, boolean_params)
final_params = {}
for key, value in params.iteritems():
new_key = key.replace(prefix, "")
final_params[new_key] = value
return final_params
def check_nullables(self, data, params):
for param in params:
if not data.get(param):
data[param] = None
|
suutari-ai/shoop
|
shuup/notify/__init__.py
|
Python
|
agpl-3.0
| 1,215 | 0 |
# -*- coding: utf-8 -*-
# This file is part o
|
f Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root dire
|
ctory of this source tree.
from shuup.apps import AppConfig
class ShuupNotifyAppConfig(AppConfig):
name = "shuup.notify"
verbose_name = "Shuup Notification Framework"
label = "shuup_notify"
provides = {
"notify_condition": [
"shuup.notify.conditions:LanguageEqual",
"shuup.notify.conditions:BooleanEqual",
"shuup.notify.conditions:IntegerEqual",
"shuup.notify.conditions:TextEqual",
"shuup.notify.conditions:Empty",
"shuup.notify.conditions:NonEmpty",
],
"notify_action": [
"shuup.notify.actions:SetDebugFlag",
"shuup.notify.actions:AddOrderLogEntry",
"shuup.notify.actions:SendEmail",
"shuup.notify.actions:AddNotification",
],
"notify_event": [],
"admin_module": [
"shuup.notify.admin_module:NotifyAdminModule",
]
}
default_app_config = "shuup.notify.ShuupNotifyAppConfig"
|
agoose77/hivesystem
|
manual/movingpanda/panda-5.py
|
Python
|
bsd-2-clause
| 3,673 | 0.001361 |
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = NodePath("")
a.setHpr(360 * random(), 0, 0)
a.setPos(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "NodePath")
def id_generator():
n = 0
while 1:
yield "spawnedpanda" + str(n)
class myscene(bee.frame):
pandaclassname_ =
|
bee.get_parameter("pandaclassname")
pandaname_ = bee.get_parameter("pandaname")
c1 = bee.configure("scene")
c1.import_mesh_EGG("models/environment")
a = NodePath("")
a.setScale(0.25)
a.setPos(-8, 42, 0)
mat = a.getMat()
|
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c1.add_model_MATRIX(matrix=m)
c2 = bee.configure("scene")
c2.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c2.add_actor_MATRIX(matrix=m, entityname=pandaname_)
c2.import_mesh_EGG("models/panda-walk4")
c2.add_animation("walk")
c3 = bee.configure("scene")
c3.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c3.add_actorclass_MATRIX(matrix=m, actorclassname=pandaclassname_)
c3.import_mesh_EGG("models/panda-walk4")
c3.add_animation("walk")
del a, m, mat
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
myscene = myscene(
scene="scene",
pandaname=pandaname_,
pandaclassname=pandaclassname_,
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
|
uclouvain/osis
|
reference/migrations/0005_auto_20160902_1639.py
|
Python
|
agpl-3.0
| 5,965 | 0.003353 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-02 14:39
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reference', '0004_educationinstitution'),
]
operations = [
migrations.CreateModel(
name='AssimilationCriteria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('criteria', models.CharField(max_length=255, unique=True)),
('order', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='EducationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('type', models.CharField(choices=[('TRANSITION',
|
'Transition'), ('QUALIFICATION', 'Qualification'), ('ANOTHER', 'Autre')], m
|
ax_length=20)),
('name', models.CharField(max_length=100)),
('adhoc', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ExternalOffer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('changed', models.DateTimeField(null=True)),
('name', models.CharField(max_length=150, unique=True)),
('adhoc', models.BooleanField(default=True)),
('national', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='GradeType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('name', models.CharField(max_length=255)),
('coverage', models.CharField(choices=[('HIGH_EDUC_NOT_UNIVERSITY', 'HIGH_EDUC_NOT_UNIVERSITY'), ('UNIVERSITY', 'UNIVERSITY'), ('UNKNOWN', 'UNKNOWN')], default='UNKNOWN', max_length=30)),
('adhoc', models.BooleanField(default=True)),
('institutional', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='InstitutionalGradeType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='domain',
name='adhoc',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='domain',
name='national',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='domain',
name='reference',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='domain',
name='type',
field=models.CharField(choices=[('HIGH_EDUC_NOT_UNIVERSITY', 'HIGH_EDUC_NOT_UNIVERSITY'), ('UNIVERSITY', 'UNIVERSITY'), ('UNKNOWN', 'UNKNOWN')], default='UNKNOWN', max_length=50),
),
migrations.AddField(
model_name='language',
name='external_id',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='domain',
name='decree',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reference.Decree'),
),
migrations.AlterField(
model_name='educationinstitution',
name='adhoc',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='educationinstitution',
name='institution_type',
field=models.CharField(choices=[('SECONDARY', 'SECONDARY'), ('UNIVERSITY', 'UNIVERSITY'), ('HIGHER_NON_UNIVERSITY', 'HIGHER_NON_UNIVERSITY')], max_length=25),
),
migrations.AlterField(
model_name='educationinstitution',
name='national_community',
field=models.CharField(blank=True, choices=[('FRENCH', 'FRENCH'), ('GERMAN', 'GERMAN'), ('DUTCH', 'DUTCH')], max_length=20, null=True),
),
migrations.AddField(
model_name='gradetype',
name='institutional_grade_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reference.InstitutionalGradeType'),
),
migrations.AddField(
model_name='externaloffer',
name='domain',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reference.Domain'),
),
migrations.AddField(
model_name='externaloffer',
name='grade_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reference.GradeType'),
),
migrations.AddField(
model_name='externaloffer',
name='offer_year',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.OfferYear'),
),
]
|
RaD/django-south
|
docs/djbook/_ext/djbookdocs.py
|
Python
|
apache-2.0
| 7,983 | 0.008081 |
# -*- coding: utf-8 -*-
"""
Sphinx plugins for Django documentation.
"""
import os
import re
from docutils import nodes, transforms
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
json = None
from sphinx import addnodes, roles, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s; field lookup type",
)
app.add_description_unit(
directivename = "django-admin",
rolename = "djadmin",
indextemplate = "pair: %s; django-admin command",
parse_node = parse_django_admin_node,
)
app.add_description_unit(
directivename = "django-admin-option",
rolename = "djadminopt",
indextemplate = "pair: %s; django-admin command-line option",
parse_node = parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
arg0 = self.arguments[0]
is_nextversion = env.config.django_next_version == arg0
ret = []
node = addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(self.arguments) == 1:
linktext = u'Пожалуйста, обратитесь к описанию релиза </releases/%s>' % (arg0)
xrefs = roles.XRefRole()('doc', linktext, linktext, self.lineno, self.state)
node.extend(xrefs[0])
node['version'] = arg0
else:
node['version'] = "Development version"
node['type'] = self.name
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1], self.lineno+1)
node.extend(inodes)
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
# <big>? Really?
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accomodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': u'Устарело в Django %s',
'versionchanged': u'Изменено в Django %s',
'versionadded': u'Добавлено в Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
len(node) and ":" or "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.direc
|
tives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
|
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
if json is None:
self.warn("cannot create templatebuiltins.js due to missing simplejson dependency")
return
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
f = open(outfilename, 'wb')
f.write('var django_template_builtins = ')
json.dump(templatebuiltins, f)
f.write(';\n')
f.close()
|
perfsonar/pscheduler
|
pscheduler-test-idlebgm/idlebgm/validate.py
|
Python
|
apache-2.0
| 1,872 | 0.017628 |
#
# Validator for "idlebg" Test
#
from pscheduler import json_validate
MAX_SCHEMA = 1
def spec_is_valid(json):
|
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"duration": { "$ref": "#/pScheduler/Duration" },
|
"host": { "$ref": "#/pScheduler/Host" },
"host-node": { "$ref": "#/pScheduler/URLHostPort" },
"interval": { "$ref": "#/pScheduler/Duration" },
"parting-comment": { "$ref": "#/pScheduler/String" },
"starting-comment": { "$ref": "#/pScheduler/String" },
},
"required": [
"duration"
]
}
return json_validate(json, schema, max_schema=MAX_SCHEMA)
def result_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"succeeded": { "$ref": "#/pScheduler/Boolean" },
"error": { "$ref": "#/pScheduler/String" },
"diags": { "$ref": "#/pScheduler/String" },
"time-slept": { "$ref": "#/pScheduler/Duration" },
},
"required": [
"succeeded",
"time-slept",
]
}
return json_validate(json, schema)
def limit_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"duration": { "$ref": "#/pScheduler/Limit/Duration" },
"starting-comment": { "$ref": "#/pScheduler/Limit/String" },
"parting-comment": { "$ref": "#/pScheduler/Limit/String" }
},
"additionalProperties": False
}
return json_validate(json, schema)
|
modulexcite/catapult
|
catapult_build/module_finder.py
|
Python
|
bsd-3-clause
| 551 | 0.005445 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
|
be
# found in the LICENSE file.
import imp
def FindModule(name):
"""Gets the path of the named module.
This is useful for cases where we want to use subprocess.call on a module we
have imported, and safer than using __file__ since that can point to .pyc
files.
Args:
name: the string name of a module (e.g. 'dev_appserver')
Returns:
The path to t
|
he module.
"""
return imp.find_module(name)[1]
|
drsudow/SEG-D
|
setup.py
|
Python
|
mit
| 1,008 | 0.029791 |
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy
setup(
name = 'SEGD',
version = '0.a1',
description = 'A Python3 reader for SEG-D rev3.1 binary data.',
url =
|
'https://github.com/drsudow/SEG-D.git',
author = 'Mattias Südow',
author_email = 'mattias@sudow.com',
license = 'MIT',
classifiers = [
'Development Status :: 3 -Aplha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
|
'Topic :: Scientific/Engineering :: Information Analysis',
'Programming Language :: Python :: 3.5',
],
keywords = 'seismic SEGD',
packages = ['SEGD'],
install_requires = ['cython','numpy','datetime'],
ext_modules = cythonize([Extension('SEGD.read_traces',['SEGD/read_traces.pyx']
,include_dirs=[numpy.get_include()])])
)
|
WojciechRynczuk/vcdMaker
|
test/functional/maker.py
|
Python
|
mit
| 2,819 | 0.000709 |
# maker.py
#
# A class representing vcdMaker specific test.
#
# Copyright (c) 2019 vcdMaker team
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from flat import Flat
from test import Test
class Maker(Test):
"""A vcdMaker specific test class."""
def __init__(self, node, test_directory):
"""The Maker class constructor.
Arguments:
node - The XML node to be read.
test_directory - The test directory.
"""
Test.__init__(self, node, test_directory)
self.command = []
self.unique_params = {'input_file': ['', 'Missing input file'],
'time_unit': ['', 'Missing time unit'],
|
'line_counter': ['', ''],
'user_format': ['', '']}
for element in node.iter(tag='unique'):
self.unique = Flat(element, self.unique_params)
self.create_command(test_directory)
def create_command(self, test_directory):
"""Builds the vcdMaker specific command line."""
self.command.append('-t')
self.command.append(self.unique.get_parameter('time_unit'))
if self.unique.get_parameter('line_counter'):
self.command.append('-c')
self.command.append(self.unique.get_parameter('line_counter'))
if self.unique.get_parameter('user_format'):
self.command.append('-u')
self.command.append(self.unique.get_parameter('user_format'))
self.command.append('-o')
self.command.append(os.path.join(test_directory,
self.common.get_parameter('output_file')))
self.command.append(os.path.join(test_directory,
self.unique.get_parameter('input_file')))
|
|
swegener/sigrok-meter
|
datamodel.py
|
Python
|
gpl-3.0
| 8,443 | 0.002961 |
##
## This file is part of the sigrok-meter project.
##
## Copyright (C) 2014 Jens Steinhauser <jens.steinhauser@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import itertools
import math
import qtcompat
import sigrok.core as sr
import util
try:
from itertools import izip
except ImportError:
izip = zip
QtCore = qtcompat.QtCore
QtGui = qtcompat.QtGui
class Trace(object):
'''Class to hold the measured samples.'''
def __init__(self):
self.samples = []
self.new = False
def append(self, sample):
self.samples.append(sample)
self.new = True
class MeasurementDataModel(QtGui.QStandardItemModel):
'''Model to hold the measured values.'''
'''Role used to identify and find the item.'''
idRole = QtCore.Qt.UserRole + 1
'''Role used to store the device vendor and model.'''
descRole = QtCore.Qt.UserRole + 2
'''Role used to store a dictionary with the traces.'''
tracesRole = QtCore.Qt.UserRole + 3
'''Role used to store the color to draw the graph of the channel.'''
colorRole = QtCore.Qt.UserRole + 4
def __init__(self, parent):
super(self.__class__, self).__init__(parent)
# Use the description text to sort the items for now, because the
# idRole holds tuples, and using them to sort doesn't work.
self.setSortRole(MeasurementDataModel.descRole)
# A generator for the colors of the channels.
self._colorgen = self._make_colorgen()
def _make_colorgen(self):
cols = [
QtGui.QColor(0x8F, 0x52, 0x02), # brown
QtGui.QColor(0x73, 0xD2, 0x16), # green
QtGui.QColor(0xCC, 0x00, 0x00), # red
QtGui.QColor(0x34, 0x65, 0xA4), # blue
QtGui.QColor(0xF5, 0x79, 0x00), # orange
QtGui.QColor(0xED, 0xD4, 0x00), # yellow
QtGui.QColor(0x75, 0x50, 0x7B) # violet
]
def myrepeat(g, n):
'''Repeats every element from 'g' 'n' times'.'''
for e in g:
for f in itertools.repeat(e, n):
yield f
colorcycle = itertools.cycle(cols)
darkness = myrepeat(itertools.count(100, 10), len(cols))
for c, d in izip(colorcycle, darkness):
yield QtGui.QColor(c).darker(d)
def format_mqflags(self, mqflags):
if sr.QuantityFlag.AC in mqflags:
return 'AC'
elif sr.QuantityFlag.DC in mqflags:
return 'DC'
else:
return ''
def format_value(self, mag):
if math.isinf(mag):
return u'\u221E'
return '{:f}'.format(mag)
def getItem(self, device, channel):
'''Return the item for the device + channel combination from the
model, or create a new item if no existing one matches.'''
# Unique identifier for the device + channel.
# TODO: Isn't there something better?
uid = (
device.vendor,
device.model,
device.serial_number(),
device.connection_id(),
channel.index
)
# Find the correct item in the model.
for row in range(self.rowCount()):
item = self.item(row)
rid = item.data(MeasurementDataModel.idRole)
rid = tuple(rid) # PySide returns a list.
if uid == rid:
return item
# Nothing found, create a new item.
desc = '{} {}, {}'.format(
device.vendor, device.model, channel.name)
item = QtGui.QStandardItem()
item.setData(uid, MeasurementDataModel.idRole)
item.setData(desc, MeasurementDataModel.descRole)
item.setData({}, MeasurementDataModel.tracesRole)
item.setData(next(self._colorgen), MeasurementDataModel.colorRole)
self.appendRow(item)
self.sort(0)
return item
@QtCore.Slot(float, sr.classes.Device, sr.classes.Channel, tuple)
def update(self, timestamp, device, channel, data):
'''Update the data for the device (+channel) with the most recent
measurement from the given payload.'''
item = self.getItem(device, channel)
value, unit, mqflags = data
value_str = self.format_value(value)
unit_str = util.format_unit(unit)
mqflags_str = self.format_mqflags(mqflags)
# The display role is a tuple containing the value and the unit/flags.
disp = (value_str, ' '.join([unit_str, mqflags_str]))
item.setData(disp, QtCore.Qt.DisplayRole)
# The samples role is a dictionary that contains the old samples for each unit.
# Should be trimmed periodically, otherwise it grows larger and larger.
if not math.isinf(value) and not math.isnan(value):
sample = (timestamp, value)
traces = item.data(MeasurementDataModel.tracesRole)
# It's not possible to use 'collections.defaultdict' here, because
# PySide doesn't return the original type that was passed in.
if not (unit in traces):
traces[unit] = Trace()
traces[unit].append(sample)
item.setData(traces, MeasurementDataModel.tracesRole)
def clear_samples(self):
'''Removes all old samples from the model.'''
for row in range(self.rowCount()):
idx = self.index(row, 0)
|
self.setData(idx, {},
MeasurementDataModel.tracesRole)
class MultimeterDelegate(QtGui.QStyledItemDelegate)
|
:
'''Delegate to show the data items from a MeasurementDataModel.'''
def __init__(self, parent, font):
'''Initialize the delegate.
:param font: Font used for the text.
'''
super(self.__class__, self).__init__(parent)
self._nfont = font
fi = QtGui.QFontInfo(self._nfont)
self._nfontheight = fi.pixelSize()
fm = QtGui.QFontMetrics(self._nfont)
r = fm.boundingRect('-XX.XXXXXX X XX')
w = 1.4 * r.width() + 2 * self._nfontheight
h = 2.6 * self._nfontheight
self._size = QtCore.QSize(w, h)
def sizeHint(self, option=None, index=None):
return self._size
def _color_rect(self, outer):
'''Returns the dimensions of the clickable rectangle.'''
x1 = (outer.height() - self._nfontheight) / 2
r = QtCore.QRect(x1, x1, self._nfontheight, self._nfontheight)
r.translate(outer.topLeft())
return r
def paint(self, painter, options, index):
value, unit = index.data(QtCore.Qt.DisplayRole)
desc = index.data(MeasurementDataModel.descRole)
color = index.data(MeasurementDataModel.colorRole)
painter.setFont(self._nfont)
# Draw the clickable rectangle.
painter.fillRect(self._color_rect(options.rect), color)
# Draw the text
h = options.rect.height()
p = options.rect.topLeft()
p += QtCore.QPoint(h, (h + self._nfontheight) / 2 - 2)
painter.drawText(p, desc + ': ' + value + ' ' + unit)
def editorEvent(self, event, model, options, index):
if type(event) is QtGui.QMouseEvent:
if event.type() == QtCore.QEvent.MouseButtonPress:
rect = self._color_rect(options.rect)
if rect.contains(event.x(), event.y()):
c = index.data(MeasurementDataModel.colorRole)
c = QtGui.QColorDialog.getColor(c, None,
'Choose new color for channel')
if c.isValid():
# False
|
klahnakoski/MoDevETL
|
pyLibrary/convert.py
|
Python
|
mpl-2.0
| 17,324 | 0.003002 |
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import
import HTMLParser
import StringIO
import ast
import base64
import cgi
from collections import Mapping
import datetime
from decimal import Decimal
import gzip
import hashlib
from io import BytesIO
import json
import re
from tempfile import TemporaryFile
from pyLibrary import strings
from pyLibrary.dot import wrap, wrap_leaves, unwrap, unwraplist, split_field, join_field, coalesce
from pyLibrary.collections.multiset import Multiset
from pyLibrary.debugs.logs import Log, Except
from pyLibrary.env.big_data import FileString, safe_size
from pyLibrary.jsons import quote
from pyLibrary.jsons.encoder import json_encoder, pypy_json_encode
from pyLibrary.strings import expand_template
from pyLibrary.times.dates import Date
"""
DUE TO MY POOR MEMORY, THIS IS A LIST OF ALL CONVERSION ROUTINES
IN <from_type> "2" <to_type> FORMAT
"""
def value2json(obj, pretty=False):
try:
json = json_encoder(obj, pretty=pretty)
if json == None:
Log.note(str(type(obj)) + " is not valid{{type}}JSON", type= " (pretty) " if pretty else " ")
Log.error("Not valid JSON: " + str(obj) + " of type " + str(type(obj)))
return json
except Exception, e:
e = Except.wrap(e)
try:
json = pypy_json_encode(obj)
return json
except Exception:
pass
Log.error("Can not encode into JSON: {{value}}", value=repr(obj), cause=e)
def remove_line_comment(line):
mode = 0 # 0=code, 1=inside_string, 2=escaping
for i, c in enumerate(line):
if c == '"':
if mode == 0:
mode = 1
elif mode == 1:
mode = 0
else:
mode = 1
elif c == '\\':
if mode == 0:
mode = 0
elif mode == 1:
mode = 2
else:
mode = 1
elif mode == 2:
mode = 1
elif c == "#" and mode == 0:
return line[0:i]
elif c == "/" and mode == 0 and line[i + 1] == "/":
return line[0:i]
return line
def json2value(json_string, params={}, flexible=False, leaves=False):
"""
:param json_string: THE JSON
:param params: STANDARD JSON PARAMS
:param flexible: REMOVE COMMENTS
:param leaves: ASSUME JSON KEYS ARE DOT-DELIMITED
:return: Python value
"""
if isinstance(json_string, str):
Log.error("only unicode json accepted")
try:
if flexible:
# REMOVE """COMMENTS""", # COMMENTS, //COMMENTS, AND \n \r
# DERIVED FROM https://github.com/jeads/datasource/blob/master/datasource/bases/BaseHub.py# L58
json_string = re.sub(r"\"\"\".*?\"\"\"", r"\n", json_string, flags=re.MULTILINE)
json_string = "\n".join(remove_line_comment(l) for l in json_string.split("\n"))
# ALLOW DICTIONARY'S NAME:VALUE LIST TO END WITH COMMA
json_string = re.sub(r",\s*\}", r"}", json_string)
# ALLOW LISTS TO END WITH COMMA
json_string = re.sub(r",\s*\]", r"]", json_string)
if params:
json_string = expand_template(json_string, params)
# LOOKUP REFERENCES
value = wrap(json_decoder(json_string))
if leaves:
value = wrap_leaves(value)
return value
except Exception, e:
e = Except.wrap(e)
if "Expecting '" in e and "' delimiter: line" in e:
line_index = int(strings.between(e.message, " line ", " column ")) - 1
column = int(strings.between(e.message, " column ", " ")) - 1
line = json_string.split("\n")[line_index].replace("\t", " ")
if column > 20:
sample = "..." + line[column - 20:]
pointer = " " + (" " * 20) + "^"
else:
sample = line
pointer = (" " * column) + "^"
if len(sample) > 43:
sample = sample[:43] + "..."
Log.error("Can not decode JSON at:\n\t" + sample + "\n\t" + pointer + "\n")
base_str = unicode2utf8(strings.limit(json_string, 1000))
hexx_str = bytes2hex(base_str, " ")
try:
char_str = " " + (" ".join(c.decode("latin1") if ord(c) >= 32 else ".") for c in base_str)
except Exception:
char_str = " "
Log.error("Can not decode JSON:\n" + char_str + "\n" + hexx_str + "\n", e)
def string2datetime(value, format=None):
return Date(value, format).value
def str2datetime(value, format=None):
return string2datetime(value, format)
def datetime2string(value, format="%Y-%m-%d %H:%M:%S"):
return Date(val
|
ue).format(format=format)
def datetime2str(value, format="%Y-%m-%d %H:%M:%S"):
return Date(value).format(format=format)
def datetime2unix(d):
try:
if d == None:
return None
elif isinstance(d
|
, datetime.datetime):
epoch = datetime.datetime(1970, 1, 1)
elif isinstance(d, datetime.date):
epoch = datetime.date(1970, 1, 1)
else:
Log.error("Can not convert {{value}} of type {{type}}", value= d, type= d.__class__)
diff = d - epoch
return Decimal(long(diff.total_seconds() * 1000000)) / 1000000
except Exception, e:
Log.error("Can not convert {{value}}", value= d, cause=e)
def datetime2milli(d):
return datetime2unix(d) * 1000
def timedelta2milli(v):
return v.total_seconds()
def unix2datetime(u):
try:
if u == None:
return None
if u == 9999999999: # PYPY BUG https://bugs.pypy.org/issue1697
return datetime.datetime(2286, 11, 20, 17, 46, 39)
return datetime.datetime.utcfromtimestamp(u)
except Exception, e:
Log.error("Can not convert {{value}} to datetime", value= u, cause=e)
def milli2datetime(u):
if u == None:
return None
return unix2datetime(u / 1000.0)
def dict2Multiset(dic):
if dic == None:
return None
output = Multiset()
output.dic = unwrap(dic).copy()
return output
def multiset2dict(value):
"""
CONVERT MULTISET TO dict THAT MAPS KEYS TO MAPS KEYS TO KEY-COUNT
"""
if value == None:
return None
return dict(value.dic)
def table2list(
column_names, # tuple of columns names
rows # list of tuples
):
return wrap([dict(zip(column_names, r)) for r in rows])
def table2tab(
column_names, # tuple of columns names
rows # list of tuples
):
def row(r):
return "\t".join(map(value2json, r))
return row(column_names)+"\n"+("\n".join(row(r) for r in rows))
def list2tab(rows):
columns = set()
for r in wrap(rows):
columns |= set(k for k, v in r.leaves())
keys = list(columns)
output = []
for r in wrap(rows):
output.append("\t".join(value2json(r[k]) for k in keys))
return "\t".join(keys) + "\n" + "\n".join(output)
def list2table(rows, column_names=None):
if column_names:
keys = list(set(column_names))
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
output = [[unwraplist(r[k]) for k in keys] for r in rows]
return wrap({
"meta": {"format": "table"},
"header": keys,
"data": output
})
def list2cube(rows, column_names=None):
if column_names:
keys = column_names
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
data = {k: [] for k in keys}
output = wrap({
"meta": {"format": "cube"},
"edges": [
{
"name": "rownum",
"d
|
hoffmaje/layla
|
layla/vocabularymanager/models.py
|
Python
|
agpl-3.0
| 240 | 0.004167 |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 Jens Hoffman
|
n (hoffmaje)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from django.db import models
from django
|
.contrib.auth.models import User
|
CiscoDevNet/coding-skills-sample-code
|
coding202-parsing-json/call-functions.py
|
Python
|
apache-2.0
| 217 | 0.041475 |
print ("I'm not a function")
def
|
my_function():
prin
|
t("Hey I'm a function!")
def brett(val):
for i in range(val):
print("I'm a function with args!")
my_function()
brett(5)
|
nwjlyons/copy-file-name
|
copy_filename.py
|
Python
|
mit
| 435 | 0.004598 |
import sublime, sublime_plugin, os
cla
|
ss CopyFilenameCommand(sublime_plugin.TextCommand):
def run(self, edit):
if len(self.view.file_name()) > 0:
filename = os.path.split(self.view.file_name())[1]
sublime.set_clipboard(filename)
sublime.status_message("Copied file name: %s" % filename)
def is_enabled(self):
return self.view.file_name() and len(self.view.file
|
_name()) > 0
|
favoretti/accessninja
|
accessninja/device.py
|
Python
|
mit
| 5,511 | 0.000363 |
#!/usr/bin/env python
from os.path import join
from config import Config
from group import HostGroup, PortGroup
from parser import Parser
from renderers.junos import JunosRenderer
from renderers.ios import IOSRenderer
from deployers.junos import JunosDeployer
from deployers.ios import IOSDeployer
from deployers.iosscp import SCPDeployer
class Device(object):
def __init__(self):
self._name = None
self._vendor = None
self._transport = None
self._save_config = None
self._include_list = list()
self._rules = list()
self._hostgroups = list()
self._portgroups = list()
self._config = Config()
self._rendered_groups = list()
self._rendered_rules = dict()
self._rendered_config = ''
@property
def vendor(self):
return self._vendor
@vendor.setter
def vendor(self, value):
if value not in ['junos', 'ios', 'arista', 'asa']:
raise Exception("The only vendors currently supported are junos, arista, ios, asa")
self._vendor = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def transport(self):
return self._transport
@transport.setter
def transport(self, value):
if value not in ['ssh']:
raise Exception("The only transport supported currently is ssh")
self._transport = value
@property
def rendered_config(self):
return self._rendered_config
@rendered_config.setter
def rendered_config(self, value):
self._rendered_config = value
@property
def rendered_rules(self):
return self._rendered_rules
@property
def rendered_groups(self):
return self._rendered_groups
@property
def hostgroups(self):
return self._hostgroups
@property
def portgroups(self):
return self._portgroups
@property
def rules(self):
return self._rules
@property
def save_config(self):
return self._save_config
@save_config.setter
def save_config(self, value):
self._save_config = value
def add_include(self, value):
self._include_list.append(value)
def parse_file(self, name):
self.name = name
config = Config()
try:
f = open('{}/{}'.format(config.devices, name))
except Exception, e:
print('Could not open device file', e)
raise e
lines = f.readlines()
for line in lines:
if line.startswith('#'):
continue
|
if line.strip().startswith('vendor'):
self.vendor = line.strip().split(' ')[1]
if line.strip().startswith('transport'):
self.transport = line.strip().split(' ')[1]
if line.strip().startswith('save_config'):
self.save_config = line.strip().split(' ')[1]
if line.strip().startswith('includ
|
e'):
self.add_include(line.strip().split(' ')[1])
def print_stats(self):
for hg in self._hostgroups:
hg.print_stats()
for rule in self._rules:
rule.print_stats()
def render(self):
print('Rendering {}'.format(self._name))
for include in self._include_list:
parsed_ruleset = Parser()
parsed_ruleset.parse_file(join(self._config.policies, include))
self._rules.append(parsed_ruleset)
for ruleset in self._rules:
self.resolve_hostgroups(ruleset)
self.resolve_portgroups(ruleset)
if self._vendor == 'junos':
renderer = JunosRenderer(self)
renderer.render()
if self._vendor == 'ios':
renderer = IOSRenderer(self)
renderer.render()
def render_to_file_and_deploy(self):
self.render()
if self._vendor == 'junos':
deployer = JunosDeployer(self)
deployer.render_to_file_and_deploy()
if self._vendor == 'ios':
#deployer = IOSDeployer(self)
deployer = SCPDeployer(self)
deployer.render_to_file_and_deploy()
def print_rendered_config(self):
print self._rendered_config
def resolve_hostgroup(self, hgname):
hg = HostGroup(hgname)
hg.parse_file()
if hg.has_inline_groups:
for ihg in hg.inline_groups:
if ihg not in self._hostgroups:
self._hostgroups.append(ihg)
if hg not in self._hostgroups:
self._hostgroups.append(hg)
def resolve_hostgroups(self, ruleset):
for rule in ruleset.tcp_rules:
if type(rule.src) == str and rule.src_is_group:
self.resolve_hostgroup(str(rule.src)[1:])
if type(rule.dst) == str and rule.dst_is_group:
self.resolve_hostgroup(str(rule.dst)[1:])
def resolve_portgroup(self, pgname):
pg = PortGroup(pgname)
pg.parse_file()
if pg not in self._portgroups:
self._portgroups.append(pg)
def resolve_portgroups(self, ruleset):
for rule in ruleset.tcp_rules:
if type(rule.srcport) == str and rule.srcport_is_group:
self.resolve_portgroup(str(rule.srcport)[1:])
if type(rule.dstport) == str and rule.dstport_is_group:
self.resolve_portgroup(str(rule.dstport)[1:])
|
yasserglez/pytiger2c
|
scripts/pytiger2c.py
|
Python
|
mit
| 4,333 | 0.006238 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script para ejecutar PyTiger2C desde la linea de comandos.
"""
import os
import sys
import optparse
import subprocess
# Add the directory containing the packages in the source distribution to the path.
# This should be removed when Tiger2C is installed.
PACKAGES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'packages'))
sys.path.insert(0, PACKAGES_DIR)
from pytiger2c import __version__, __authors__, tiger2c, tiger2dot
from pytiger2c.errors import PyTiger2CError
EXIT_SUCCESS, EXIT_FAILURE = 0, 1
def _parse_args(argv):
"""
Reconoce las opciones especificadas como argumentos.
@type argv: C{list}
@param argv: Lista de argumentos del programa.
@rtype: C{tuple}
@return: Retorna una tupla donde el primer elemento es una estructura
que almacena la información acerca de las opciones especificadas
y el segundo elemento es una lista con el resto de los argumentos.
"""
usage = '%prog <tiger-file> --output <output-file> [--output-type <output-type>]'
version = '%%prog (PyTiger2C) %s\n' % __version__
authors = '\n'.join(['Copyright (C) 2009, 2010 %s' % a for a in __authors__])
desc = 'Translates a Tiger program received as argument into a C program ' \
'and then compiles the C program into an executable using a C compiler. ' \
'This behavior can be modified using the --output-type option.'
parser = optparse.OptionParser(usage=usage,
version=version + authors,
description=desc,
prog=os.path.basename(argv[0]))
parser.add_option('-o', '--output', action='store', dest='output', metavar='FILE',
help='write the output to FILE')
parser.add_option('-t', '--output-type', action='store', dest='output_type', metavar='TYPE',
type='choice', choices=('ast', 'c', 'binary'),
help="output type: 'ast', 'c' or 'binary' (default '%default')")
par
|
ser.set_default('output_type', 'binary')
options, args = parser.parse_args(args=argv[1:])
optparse.check_choice(parser.get_option('--output-type'), '--output-type', options.output_type)
if not options.output:
parser.error('missing required --output option')
elif len(args) != 1:
parser.error('invalid number of arguments')
else:
return options, args
def main(argv):
"""
Función principal del script.
@type argv: C{list}
@param argv: Lista de
|
argumentos del programa.
@rtype: C{int}
@return: Retorna 0 si no ocurrió ningún error durante la ejecución
del programa y 1 en el caso contrario.
"""
options, args = _parse_args(argv)
tiger_filename = os.path.abspath(args[0])
output_filename = os.path.abspath(options.output)
try:
if options.output_type == 'ast':
tiger2dot(tiger_filename, output_filename)
elif options.output_type == 'c':
tiger2c(tiger_filename, output_filename)
# Translation completed. Beautify the code using GNU Indent.
INDENT_CMD = ['indent', '-gnu', '-l100', '-o', output_filename, output_filename]
if subprocess.call(INDENT_CMD) != EXIT_SUCCESS:
# Leave the c file for debugging.
sys.exit(EXIT_FAILURE)
elif options.output_type == 'binary':
basename = os.path.basename(tiger_filename)
index = basename.rfind('.')
c_filename = '%s.c' % (basename[:index] if index > 0 else basename)
c_filename = os.path.join(os.path.dirname(tiger_filename), c_filename)
tiger2c(tiger_filename, c_filename)
# Translation completed. Compile using GCC.
GCC_CMD = ['gcc', c_filename, '-o', output_filename, '-std=c99', '-lgc']
if subprocess.call(GCC_CMD) != EXIT_SUCCESS:
# Leave the temporal c file for debugging.
sys.exit(EXIT_FAILURE)
os.unlink(c_filename)
except PyTiger2CError, error:
print >> sys.stderr, error
sys.exit(EXIT_FAILURE)
else:
sys.exit(EXIT_SUCCESS)
if __name__ == '__main__':
main(sys.argv)
|
bassijtsma/chatbot
|
yowsup/demos/echoclient/stack.py
|
Python
|
gpl-3.0
| 2,467 | 0.008512 |
from yowsup.stacks import YowStack
from .layer import EchoLayer
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowCryptLayer, YowAuthenticationProtocolLayer, AuthError
from yowsup.layers.coder import YowCoderLayer
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.protocol_messages import YowMessagesProtocolLayer
from yowsup.layers.protocol_media import YowMediaProtocolLayer
from yowsup.layers.stanzaregulator import YowStanzaRegulator
from yo
|
wsup.layers.protocol_receipts import YowReceiptProtocolLayer
from yowsup.layers.protocol_acks import YowAckProtocolLayer
from yowsup.layers.logger import YowLoggerLayer
from yowsup.layers.protocol_iq import YowIqProtocolLayer
from yowsup.layers.protocol_calls import YowCallsProtocolLayer
from yowsup.layers import YowParallelLayer
class YowsupEchoStack(object):
def __init__(self, credentials, encr
|
yptionEnabled = False):
if encryptionEnabled:
from yowsup.layers.axolotl import YowAxolotlLayer
layers = (
EchoLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer]),
YowAxolotlLayer,
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
else:
layers = (
EchoLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer]),
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
self.stack = YowStack(layers)
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
self.stack.loop()
except AuthError as e:
print("Authentication Error: %s" % e.message)
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/network/iosxr/iosxr_user.py
|
Python
|
gpl-3.0
| 29,071 | 0.002752 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_user
version_added: "2.4"
author:
- "Trishna Guha (@trishnaguha)"
- "Sebastiaan van Doesselaar (@sebasdoes)"
- "Kedar Kekan (@kedarX)"
short_description: Manage the aggregate of local users on Cisco IOS XR device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
extends_documentation_fragment: iosxr
notes:
- Tested against IOS XRv 6.1.2
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS XR device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS XR device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS XR device. The password
needs to be provided in clear text. Password is encrypted on the device
when used with I(cli) and by Ansible when used with I(netconf)
using the same MD5 hash technique with salt size of 3.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
group:
description:
- Configures the group for the username in the
device running configuration. The argument accepts a string value
defining the group name. This argument does not check if the group
has been configured on the device.
aliases: ['role']
groups:
version_added: "2.5"
description:
- Configures the groups for the username in the device running
configuration. The argument accepts a list of group names.
This argument does not check if the group has been configured
on the device. It is similar to the aggregrate command for
usernames, but lets you configure multiple groups for the user(s).
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user and the current defined set of users.
type: bool
default: false
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
- Applicable only when using network_cli transport
type: bool
default: false
version_added: "2.8"
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
public_key:
version_added: "2.5"
description:
- Configures the contents of the public keyfile to upload to the IOS-XR node.
This enables users to login using the accompanying private key. IOS-XR
only accepts base64 decoded files, so this will be decoded and uploaded
to the node. Do note that this requires an OpenSSL public key file,
PuTTy generated files will not work! Mutually exclusive with
public_key_contents. If used with multiple users in aggregates, then the
same key file is used for all users.
public_key_contents:
version_added: "2.5"
description:
- Configu
|
res the contents of the public keyfile to upload to the IOS-XR node.
This enables users to login using the accompanying private key. IOS-XR
only accepts base64 decoded files, so this will be decoded and uploaded
to the node. Do note that this requires an OpenSSL public key file,
P
|
uTTy generated files will not work! Mutually exclusive with
public_key.If used with multiple users in aggregates, then the
same key file is used for all users.
requirements:
- base64 when using I(public_key_contents) or I(public_key)
- paramiko when using I(public_key_contents) or I(public_key)
"""
EXAMPLES = """
- name: create a new user
iosxr_user:
name: ansible
configured_password: mypassword
state: present
- name: create a new user in admin configuration mode
iosxr_user:
name: ansible
configured_password: mypassword
admin: True
state: present
- name: remove all users except admin
iosxr_user:
purge: True
- name: set multiple users to group sys-admin
iosxr_user:
aggregate:
- name: netop
- name: netend
group: sysadmin
state: present
- name: set multiple users to multiple groups
iosxr_user:
aggregate:
- name: netop
- name: netend
groups:
- sysadmin
- root-system
state: present
- name: Change Password for User netop
iosxr_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Add private key authentication for user netop
iosxr_user:
name: netop
state: present
public_key_contents: "{{ lookup('file', '/home/netop/.ssh/id_rsa.pub' }}"
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password group sysadmin
- username admin secret admin
xml:
description: NetConf rpc xml sent to device with transport C(netconf)
returned: always (empty list when no xml rpc to send)
type: list
version_added: 2.5
sample:
- '<config xmlns:xc=\"urn:ietf:params:xml:ns:netconf:base:1.0\">
<aaa xmlns=\"http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-lib-cfg\">
<usernames xmlns=\"http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-locald-cfg\">
<username xc:operation=\"merge\">
<name>test7</name>
<usergroup-under-usernames>
<usergroup-under-username>
<name>sysadmin</name>
</usergroup-under-username>
</usergroup-under-usernames>
<secret>$1$ZsXC$zZ50wqhDC543ZWQkkAHLW0</secret>
</username>
</usernames>
</aaa>
</config>'
"""
import os
from functools import partial
from copy import deepcopy
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.iosxr.iosxr import get_config, load_config, is_netconf, is_cliconf
from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec, build_xml, etree_findall
try:
from base64 import b64decode
HAS_B64 = True
except ImportError:
HAS_B64 = False
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
class PublicKeyManager(object):
def __init__(self, module, result):
self._module = module
self._result =
|
ARMmbed/yotta
|
yotta/lib/pack.py
|
Python
|
apache-2.0
| 23,065 | 0.002948 |
# Copyright 2014-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import json
import os
from collections import OrderedDict
import tarfile
import re
import logging
import errno
import copy
import hashlib
# PyPi/standard library > 3.4
# it has to be PurePath
from pathlib import PurePath
# JSON Schema, pip install jsonschema, Verify JSON Schemas, MIT
import jsonschema
# Ordered JSON, , read & write json, internal
from yotta.lib import ordered_json
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
# Registry Access, , access packages in the registry, internal
from yotta.lib import registry_access
# These patterns are used in addition to any glob expressions defined by the
# .yotta_ignore file
Default_Publish_Ignore = [
'/upload.tar.[gb]z',
'/.git',
'/.hg',
'/.svn',
'/yotta_modules',
'/yotta_targets',
'/build',
'.DS_Store',
'*.sw[ponml]',
'*~',
'._.*',
'.yotta.json'
]
Readme_Regex = re.compile('^readme(?:\.md)', re.IGNORECASE)
Ignore_List_Fname = '.yotta_ignore'
Shrinkwrap_Fname = 'yotta-shrinkwrap.json'
Shrinkwrap_Schema = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema', 'shrinkwrap.json')
Origin_Info_Fname = '.yotta_origin.json'
logger = logging.getLogger('components')
def tryTerminate(process):
try:
process.terminate()
except OSError as e:
# if the error is "no such process" then the process probably exited
# while we were waiting for it, so don't raise an exception
if e.errno != errno.ESRCH:
raise
class InvalidDescription(Exception):
pass
# OptionalFileWrapper provides a scope object that can wrap a none-existent file
class OptionalFileWrapper(object):
def __init__(self, fname=None, mode=None):
self.fname = fname
self.mode = mode
super(OptionalFileWrapper, self).__init__()
def __enter__(self):
if self.fname:
self.file = open(self.fname, self.mode)
else:
self.file = open(os.devnull)
return self
def __exit__(self, type, value, traceback):
self.file.close()
def contents(self):
if self.fname:
return self.file.read()
else:
return ''
def extension(self):
if self.fname:
return os.path.splitext(self.fname)[1]
else:
return ''
def __nonzero__(self):
return bool(self.fname)
# python 3 truthiness
def __bool__(self):
return bool(self.fname)
class DependencySpec(object):
def __init__(self, name, version_req, is_test_dependency=False, shrinkwrap_version_req=None, specifying_module=None):
self.name = name
self.version_req = version_req
self.specifying_module = specifying_module # for diagnostic info only, may not be present
self.is_test_dependency = is_test_dependency
self.shrinkwrap_version_req = shrinkwrap_version_req
def isShrinkwrapped(self):
return self.shrinkwrap_version_req is not None
def nonShrinkwrappedVersionReq(self):
''' return the dependency specification ignoring any shrinkwrap '''
return self.version_req
def versionReq(self):
''' r
|
eturn the dependency specification, which may be from a shrinkwrap file '''
return self.shrinkwrap_version_req or self.version_req
def __unicode__(self):
return u'%s at %s' % (self.name, self.version_req)
def __str__(self):
import sys
# in python 3 __str__ must return a string (i.e. unicode), in
# python 2, it must no
|
t return unicode, so:
if sys.version_info[0] >= 3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf8')
def __repr__(self):
return self.__unicode__()
def tryReadJSON(filename, schemaname):
r = None
try:
with open(filename, 'r') as jsonfile:
r = ordered_json.load(filename)
if schemaname is not None:
with open(schemaname, 'r') as schema_file:
schema = json.load(schema_file)
validator = jsonschema.Draft4Validator(schema)
for error in validator.iter_errors(r):
logger.error(
'%s is not valid under the schema: %s value %s',
filename,
u'.'.join([str(x) for x in error.path]),
error.message
)
except IOError as e:
if e.errno != errno.ENOENT:
raise
return r
# Pack represents the common parts of Target and Component objects (versions,
# VCS, etc.)
class Pack(object):
schema_errors_displayed = set()
def __init__(
self,
path,
description_filename,
installed_linked,
schema_filename = None,
latest_suitable_version = None,
inherit_shrinkwrap = None
):
# version, , represent versions and specifications, internal
from yotta.lib import version
# vcs, , represent version controlled directories, internal
from yotta.lib import vcs
# resolve links at creation time, to minimise path lengths:
self.unresolved_path = path
self.path = fsutils.realpath(path)
self.installed_linked = installed_linked
self.vcs = None
self.error = None
self.latest_suitable_version = latest_suitable_version
self.version = None
self.description_filename = description_filename
self.ignore_list_fname = Ignore_List_Fname
self.ignore_patterns = copy.copy(Default_Publish_Ignore)
self.origin_info = None
description_file = os.path.join(path, description_filename)
if os.path.isfile(description_file):
try:
self.description = ordered_json.load(description_file)
if self.description:
if not 'name' in self.description:
raise Exception('missing "name"')
if 'version' in self.description:
self.version = version.Version(self.description['version'])
else:
raise Exception('missing "version"')
except Exception as e:
self.description = OrderedDict()
self.error = "Description invalid %s: %s" % (description_file, e);
logger.debug(self.error)
raise InvalidDescription(self.error)
else:
self.error = "No %s file." % description_filename
self.description = OrderedDict()
try:
with open(os.path.join(path, self.ignore_list_fname), 'r') as ignorefile:
self.ignore_patterns += self._parseIgnoreFile(ignorefile)
except IOError as e:
if e.errno != errno.ENOENT:
raise
# warn about invalid yotta versions before schema errors (as new yotta
# might introduce new schema)
yotta_version_spec = None
if self.description and self.description.get('yotta', None):
try:
yotta_version_spec = version.Spec(self.description['yotta'])
except ValueError as e:
logger.warning(
"could not parse yotta version spec '%s' from %s: it "+
"might require a newer version of yotta",
self.description['yotta'],
self.description['name']
)
if yotta_version_spec is not None:
import yotta
yotta_version = version.Version(yotta.__version__)
if not yotta_version_spec.match(yotta_version):
self.error = "requires yotta version %s (current version is %s). see http://docs.yottabuild.org for update instructions" % (
str(yotta_version_spec),
str(yotta_version)
)
if self.de
|
Lipen/LipenDev
|
Azeroth/Pandaria/process.py
|
Python
|
gpl-3.0
| 2,341 | 0.032892 |
import os
sys = os.system
CC = 'g++ {} -std=gnu++0x -Wall'
FLAG_clear = ['/c', '-c']
FLAG_window = ['/w', '-w']
FLAG_exit = ['/e', '-e']
def main():
print('List of existing <*.cpp> files:')
files = []
counter = 0
for file in os.listdir():
if file[-4:] == '.cpp':
counter += 1
files.append(file)
print('{:->3d}) {}'.format(counter, file[:-4]))
name = ''
flags = []
command, *ex = input('Enter your <command> [<name>] [<*flags>]: ').split()
if len(ex):
name = ex[0]
flags = list(ex[1:])
try:
name = files[int(name) - 1]
except:
if name[0] == '#':
try:
fileid = int(name[1:])
name = files[fileid - 1]
except:
pass
else:
flags = list(ex)
if command == 'open':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
if len(list(set(FLAG_window).intersection(set(flags)))) > 0:
sys('start {}'.format(name))
else:
sys('{}'.format(name))
elif command == 'compile':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('Compiling...')
err = sys((CC+' -o {}.exe').format(name, name[:-4]))
if err:
print('Error during compiling. <{}>'.format(err))
else:
print('Compiled succesfully.')
elif command == 'run':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
|
sys('cls')
print('Compiling...')
err = sys((CC+' -o {}.exe').format(name, name[:-4]))
if err:
print('Error during compiling. <{}>'.format(err))
else:
print('Compiled succesfully. Starting:\n' + '-' * 31)
if len(list(set(FLAG_window).intersection(set(flags)))) > 0:
err2 = sys('start {}.exe'.format(name[:-4]))
else:
err2 = sys('{}.exe'.format(name[:-4]))
if err2:
print('-' * 30 + '\nError during execution. <{}
|
>'.format(err2))
else:
print('-' * 17 + '\nDone succesfully.')
elif command == 'list':
if name != '':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('List of existing <*.{}> files:'.format(name))
l = len(name)
for file in os.listdir():
if file[-l:] == name:
print('{:>20}'.format(file[:-l - 1]))
else:
print('List of all existing files:')
for file in os.listdir():
print('{:>20}'.format(file))
if len(list(set(FLAG_exit).intersection(set(flags)))) == 0:
input('-' * 25 + '\nEnd. Press enter to exit: ')
main()
|
peterhudec/authomatic
|
examples/gae/simple/main.py
|
Python
|
mit
| 6,966 | 0.001005 |
# -*- coding: utf-8 -*-
# main.py
import webapp2
from authomatic import Authomatic
from authomatic.adapters import Webapp2Adapter
from config import CONFIG
# Instantiate Authomatic.
authomatic = Authomatic(config=CONFIG, secret='some random secret string')
# Create a simple request handler for the login procedure.
class Login(webapp2.RequestHandler):
# The handler must accept GET and POST http methods and
# Accept any HTTP method and catch the "provider_name" URL variable.
def any(self, provider_name):
# It all begins with login.
result = authomatic.login(Webapp2Adapter(self), provider_name)
# Do not write anything to the response if there is no result!
if result:
# If there is result, the login procedure is over and we can write
# to response.
self.response.write('<a href="..">Home</a>')
if result.error:
# Login procedure finished with an error.
self.response.write(
u'<h2>Damn that error: {}</h2>'.format(result.error.message))
elif result.user:
# Hooray, we ha
|
ve the user!
# OAuth 2.0 and OAuth 1.0a provide only limited user data on login,
# We need to update the user to get more info.
if not (result.user.name and result.user.id):
result.user.update()
# Welcome the user.
self.response.write(u'<h1>Hi {}</h1>'.format(result.user.name))
self.response.write(
u'<h2>Your id is: {}</h2>'.format(result.user.id))
s
|
elf.response.write(
u'<h2>Your email is: {}</h2>'.format(result.user.email))
# Seems like we're done, but there's more we can do...
# If there are credentials (only by AuthorizationProvider),
# we can _access user's protected resources.
if result.user.credentials:
# Each provider has it's specific API.
if result.provider.name == 'fb':
self.response.write(
'Your are logged in with Facebook.<br />')
# We will access the user's 5 most recent statuses.
url = 'https://graph.facebook.com/{}?fields=feed.limit(5)'
url = url.format(result.user.id)
# Access user's protected resource.
response = result.provider.access(url)
if response.status == 200:
# Parse response.
statuses = response.data.get('feed').get('data')
error = response.data.get('error')
if error:
self.response.write(
u'Damn that error: {}!'.format(error))
elif statuses:
self.response.write(
'Your 5 most recent statuses:<br />')
for message in statuses:
text = message.get('message')
date = message.get('created_time')
self.response.write(
u'<h3>{}</h3>'.format(text))
self.response.write(
u'Posted on: {}'.format(date))
else:
self.response.write(
'Damn that unknown error!<br />')
self.response.write(
u'Status: {}'.format(response.status))
if result.provider.name == 'tw':
self.response.write(
'Your are logged in with Twitter.<br />')
# We will get the user's 5 most recent tweets.
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
# You can pass a dictionary of querystring parameters.
response = result.provider.access(url, {'count': 5})
# Parse response.
if response.status == 200:
if isinstance(response.data, list):
# Twitter returns the tweets as a JSON list.
self.response.write(
'Your 5 most recent tweets:')
for tweet in response.data:
text = tweet.get('text')
date = tweet.get('created_at')
self.response.write(
u'<h3>{}</h3>'.format(text.replace(u'\u2013', '[???]')))
self.response.write(
u'Tweeted on: {}'.format(date))
elif response.data.get('errors'):
self.response.write(u'Damn that error: {}!'.
format(response.data.get('errors')))
else:
self.response.write(
'Damn that unknown error!<br />')
self.response.write(
u'Status: {}'.format(response.status))
# Create a home request handler just that you don't have to enter the urls
# manually.
class Home(webapp2.RequestHandler):
def get(self):
# Create links to the Login handler.
self.response.write(
'Login with <a href="login/fb">Facebook</a>.<br />')
self.response.write('Login with <a href="login/tw">Twitter</a>.<br />')
# Create OpenID form where the user can specify their claimed identifier.
# The library by default extracts the identifier from the "id"
# parameter.
self.response.write('''
<form action="login/oi">
<input type="text" name="id" value="me.yahoo.com" />
<input type="submit" value="Authenticate With OpenID">
</form>
''')
# Create GAEOpenID form
self.response.write('''
<form action="login/gae_oi">
<input type="text" name="id" value="me.yahoo.com" />
<input type="submit" value="Authenticate With GAEOpenID">
</form>
''')
# Create routes.
ROUTES = [webapp2.Route(r'/login/<:.*>', Login, handler_method='any'),
webapp2.Route(r'/', Home)]
# Instantiate the webapp2 WSGI application.
app = webapp2.WSGIApplication(ROUTES, debug=True)
|
sorenh/cc
|
vendor/Twisted-10.0.0/twisted/conch/test/test_mixin.py
|
Python
|
apache-2.0
| 1,110 | 0.001802 |
# -*- twisted.conch.test.test_mixin -*-
# Copyright (c) 2001-2004 Twisted Matrix Labora
|
tories.
# See LICENSE for details.
import time
from twisted.internet import reactor, protocol
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch import mixin
class TestBufferingProto(mixin.BufferingMixin):
scheduled = False
rescheduled = 0
def sched
|
ule(self):
self.scheduled = True
return object()
def reschedule(self, token):
self.rescheduled += 1
class BufferingTest(unittest.TestCase):
def testBuffering(self):
p = TestBufferingProto()
t = p.transport = StringTransport()
self.failIf(p.scheduled)
L = ['foo', 'bar', 'baz', 'quux']
p.write('foo')
self.failUnless(p.scheduled)
self.failIf(p.rescheduled)
for s in L:
n = p.rescheduled
p.write(s)
self.assertEquals(p.rescheduled, n + 1)
self.assertEquals(t.value(), '')
p.flush()
self.assertEquals(t.value(), 'foo' + ''.join(L))
|
hgijeon/NetworkLayer
|
morse.py
|
Python
|
gpl-2.0
| 5,184 | 0.021991 |
AZ09 = ["A","B","C","D"]
MorseAZ09 = [".-","-...","-.-.","-.."]
def str2morse(string):
string = string.upper()
ret = ""
for c in string:
ret += MorseAZ09[AZ09.index(c)] +" "
return ret
# alphanumeric to morse code dictionary
AN2Morse = {"A":".-",
"B":"-...",
"C":"-.-.",
"D":"-..",
"E":".",
"F":"..-.",
"G":"--.",
"H":"....",
"I":"..",
"J":".---",
"K":"-.-",
"L":".-..",
"M":"--",
"N":"-.",
"O":"---",
"P":".--.",
"Q":"--.-",
"R":".-.",
"S":"...",
"T":"-",
"U":"..-",
"V":"...-",
"W":".--",
"X":"-..-",
"Y":"-.--",
"Z":"--..",
"1":".----",
"2":"..---",
"3":"...--",
"4":"....-",
"5":".....",
"6":"-....",
"7":"--...",
"8":"---..",
"9":"----.",
"0":"-----",
" ":" ",
#"\\":"-.-.-",
"!":"-.-.--",
"@":".--.-.",
#"#":"--.-.",
"$":"...-..-",
#"%":"---.-",
#"^":"",
"&":".-...",
#"*":"...-.",
"(":"-.--.",
")":"-.--.-",
"-":"-....-",
":":"---...",
'"':".-..-.",
"'":".----.",
"+":".-.-.",
#"_":"",
"?":"..--..",
".":".......",#beacuse the morse code for '.' is the same as the stop!
"/":"-..-.",
#">":"-.---",
#"<":"-.--.",
#";":"",
",":"--..--",
#"~":".---..",
#"`":"-..-.-",
"=":"-...-",
#"|":"--.-.-",
"{":"-.--.",
"}":"-.--.-",
"[":"-.--.",
"]":"-.--.-", #all brackets and parentheses have the same code
#commented out keys with values are from here:
#http://www.tandemmaster.org/morse_code.html
}
Morse2AN = {v:k for (k,v) in AN2Morse.items()}
splitLetter = " "
def an2morse(string):
return [AN2Morse[c] for c in string.upper()]
def morse2bit(morseList):
bitList = []
for ch in morseList:
for elem in ch:
if elem == ".":
bitList.append("1")
elif elem == "-":
bitList += ["1", "1", "1"]
elif elem == " ":
bitList.append("0")
bitList.append("0") # end of dot or dash
bitList += ["0", "0"] # end of character
return bitList
def seq2tuple(onOffSeq):
tupleList = []
start0 = start1 = 0
while True:
try:
start1 = onOffSeq.index('1', start0)
tupleList.append(('0', start1-start0))
start0 = onOffSeq.index('0', start1)
tupleList.append(('1', start0-start1))
except:
if len(tupleList) > 0 and tupleList[0][0] == '0':
tupleList = tupleList[1:]
return tupleList
def tuple2bitData(tupleList):
bitDataList = [] # ex: [('1',1), ('0',3), ('1',3), ...]
lenOfDot = findLenOfDot(tupleList)
newList = removeNoise(tupleList,lenOfDot)
for e in newList:
ref = e[1] / lenOfDot
l = 7 if ref > 5 else 3 if ref > 2 else 1
bitDataList.append((e[0], l))
return bitDataList
def removeNoise(tupleList, lenOfDot):
tmp = []
for e in tupleList:
if e[1] / lenOfDot > 0.5:
tmp.append(e)
if len(tmp) < 2:
return tmp
ret = [tmp[0]]
for i in range(1, len(tmp)):
if ret[-1][0] == tmp[i][0]:
ret[-1] = (ret[-1][0], ret[-1][1] + tmp[i][1])
else:
ret.append(tmp[i])
return ret
def findLenOfDot(tupleList):
listOfOne = [e[1] for e in tupleList if e[0] == '1']
avg = sum(listOfOne) / len(listOfOne)
listOfDot = [e for e in listOfOne if e < avg]
return sum(listOfDot) / len(listOfDot)
def bitData2morse(bitDataList):
morseList = []
ch = ''
for e in bitDataList:
if e[0] == '0' or e[0] == False:
if e[1] != 1 and ch != '':
|
morseList.append(ch)
ch = ''
if e[1] >= 6:
morseList.append(" ")
elif e[0] == '1' or e[0] == True:
if e[1] == 1:
ch += '.'
elif e[1] == 3:
ch += '-'
if ch != '':
morseList.append(ch)
return morseList
def morse2an(morseList):
ret
|
urn "".join([Morse2AN[m] for m in morseList])
def an2bit(string):
return morse2bit(an2morse(string))
def seq2an(onOffSeq):
return morse2an(bitData2morse(tuple2bitData(seq2tuple(onOffSeq))))
|
frederica07/Dragon_Programming_Process
|
PyOpenGL-3.0.2/src/missingglut.py
|
Python
|
bsd-2-clause
| 1,579 | 0.031032 |
#! /usr/bin/env python
"""Script to find missing GLUT entry points"""
from OpenGL import GLUT
import subprocess, re
func
|
_finder = re.compile( 'FGAPIENTRY (\w+)\(' )
constant_finder = re.compile( '#define\W+([0-9a-zA-Z_]+)\W+((0x)?\d+)' )
INCLUDE_DIR = '/usr/include/GL'
def defined( ):
"""Grep FGAPIENTRY headers fr
|
om /usr/include/GL"""
pipe = subprocess.Popen( 'grep -r FGAPIENTRY %(INCLUDE_DIR)s/*'%globals(), shell=True, stdout=subprocess.PIPE )
stdout,stderr = pipe.communicate()
return stdout
def constants():
pipe = subprocess.Popen( 'grep -r "#define" %(INCLUDE_DIR)s/*glut*'%globals(), shell=True, stdout=subprocess.PIPE )
stdout,stderr = pipe.communicate()
return stdout
def main():
headers = {}
for line in defined().splitlines():
match = func_finder.search( line )
if match:
headers[match.group(1)] = line.split(':',1)[0]
for key in headers.keys():
if hasattr( GLUT, key ):
del headers[key]
import pprint
pprint.pprint( headers )
missing = {}
for line in constants().splitlines():
match = constant_finder.search( line )
if match:
key,value=(match.group(1),match.group(2))
if not hasattr( GLUT, key ):
file = line.split(':',1)[0]
missing.setdefault(file,[]).append( (key,value))
for file,variables in missing.items():
print file
variables.sort()
for key,value in variables:
print '%s=%s'%(key,value)
if __name__ == "__main__":
main()
|
sinsai/Sahana_eden
|
controllers/asset.py
|
Python
|
mit
| 2,306 | 0.006071 |
# -*- coding: utf-8 -*-
""" Asset
@author: Michael Howden (michael@sahanafoundation.org)
@date-created: 2011-03-18
Asset Management Functi
|
onality
"""
prefix = request.controller
resourcename = request.function
#=========================================================================
|
=====
response.menu_options = [
#[T("Home"), False, URL(r=request, c="asset", f="index")],
[T("Assets"), False, URL(r=request, c="asset", f="asset"),
[
[T("List"), False, URL(r=request, c="asset", f="asset")],
[T("Add"), False, URL(r=request, c="asset", f="asset", args="create")],
]],
[T("Catalog Items"), False, URL(r=request, c="supply", f="item"),
[
[T("List"), False, URL(r=request, c="supply", f="item")],
[T("Add"), False, URL(r=request, c="supply", f="item", args="create")],
]],
]
def index():
"""
"""
module_name = deployment_settings.modules[prefix].name_nice
response.title = module_name
return dict(module_name=module_name)
#==============================================================================
def shn_asset_rheader(r):
""" Resource Header for Items """
if r.representation == "html":
asset = r.record
if asset:
tabs = [
(T("Edit Details"), None),
(T("Assignments"), "assign"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
item = db.asset_asset.item_id.represent(asset.item_id)
rheader = DIV(TABLE(TR( TH("%s: " % T("Asset Number")),
asset.number,
TH("%s: " % T("Item")), item,
),
),
rheader_tabs
)
return rheader
return None
#==============================================================================
def asset():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
return s3_rest_controller(prefix, resourcename, rheader=shn_asset_rheader)
# END =========================================================================
|
DailyActie/Surrogate-Model
|
01-codes/tensorflow-master/tensorflow/models/image/mnist/convolutional.py
|
Python
|
mit
| 13,852 | 0.000361 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
import numpy
import tensorflow as tf
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
FLAGS = tf.app.flags.FLAGS
def maybe_download(filename):
"""Download the data from Yann's web, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
tf.float32,
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev=0.1,
seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
|
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn
|
.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
|
jesseklein406/data-structures
|
tests/test_simple_graph.py
|
Python
|
mit
| 3,955 | 0.000759 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
i
|
mport pytest
import simple_graph
@pytest.fixture(scope="function")
def create_graph():
new_graph = simple_graph.G()
return new_graph
@pytest.fixture(scope="function")
def build_graph(create_graph):
jerry = simple_graph.Node('Jerry', 5)
allen = simple_graph.Node
|
('Allen', 8)
six = simple_graph.Node('6', 6)
# jerry2allen = simple_graph.Edge(jerry, allen)
# allen2six = simple_graph.Edge(allen, six)
create_graph.add_node(jerry)
create_graph.add_node(allen)
create_graph.add_node(six)
create_graph.add_edge(jerry, allen)
create_graph.add_edge(allen, six)
return create_graph
# g.nodes(): return a list of all nodes in the graph
def test_nodes(build_graph):
build_graph_node_names = [i.name for i in build_graph.nodes()]
assert set(build_graph_node_names) == set(['Jerry', 'Allen', '6'])
# g.edges(): return a list of all edges in the graph
def test_edges(build_graph):
build_graph_edge_names = [(i[0].name, i[1].name) for i in build_graph.edges()]
assert set(build_graph_edge_names) == set([('Jerry', 'Allen'), ('Allen', '6')])
# g.add_node(n): adds a new node 'n' to the graph
def test_add_node(build_graph):
new_node = simple_graph.Node('Jimmy', 0)
build_graph.add_node(new_node)
assert new_node in build_graph.nodes()
# g.add_edge(n1, n2): adds a new edge to the graph connecting 'n1' and 'n2', if
# either n1 or n2 are not already present in the graph, they should be added.
def test_add_edge(build_graph):
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
build_graph.add_node(new_node1)
build_graph.add_node(new_node2)
build_graph.add_edge(new_node1, new_node2)
assert new_node1, new_node2 in build_graph.edges()
def test_add_edge_from_new_nodes():
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
build_graph.add_edge(new_node1, new_node2)
assert new_node1, new_node2 in build_graph.edges()
# g.del_node(n): deletes the node 'n' from the graph, raises an error if no
# such node exists
def test_del_node(build_graph):
current_node = build_graph.nodes()[0]
build_graph.del_node(current_node)
assert current_node not in build_graph.nodes()
# we expect edges to be consistent and updated with nodes
assert current_node not in [
build_graph.edges()[i] for i in range(len(build_graph.edge()))
]
def test_del_nonexistent_node(build_graph):
new_node = simple_graph.Node('new', 1)
# not in build_graph
with pytest.raises(ValueError):
assert build_graph.del_node(new_node)
# g.del_edge(n1, n2): deletes the edge connecting 'n1' and 'n2' from the graph,
# raises an error if no such edge exists
def test_del_edge(build_graph):
current_edge = build_graph.edges()[0]
build_graph.del_edge(current_edge)
assert current_edge not in build_graph.edges()
def test_del_nonexistent_edge(build_graph):
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
new_edge = (new_node1, new_node2)
with pytest.raises(ValueError):
assert build_graph.del_node(new_edge)
# g.has_node(n): True if node 'n' is contained in the graph, False if not.
def test_has_node(build_graph):
contained_node = build_graph.nodes()[0]
assert build_graph.test_has_node(contained_node)
def test_node_not_contained(build_graph):
new_node = simple_graph.Node('new', 1)
assert not build_graph.test_has_node(new_node)
# g.neighbors(n): returns the list of all nodes connected to 'n' by edges,
# raises an error if n is not in g
def test_neighbors(build_graph):
pass
# g.adjacent(n1, n2): returns True if there is an edge connecting n1 and n2,
# False if not, raises an error if either of the supplied nodes are not in g
def test_adjacent(build_graph):
pass
|
aschampion/CATMAID
|
django/applications/catmaid/control/data_view.py
|
Python
|
gpl-3.0
| 5,209 | 0.011327 |
import json
import re
from collections import defaultdict
from django.conf import settings
from django.db.models import Count
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template import Context, loader
from django.contrib.contenttypes.models import ContentType
from taggit.models import TaggedItem
from catmaid.control.common import makeJSON_legacy_list
from catmaid.control.project import get_project_qs_for_user, extend_projects
from catmaid.models import DataView, DataViewType, Project, Stack, ProjectStack
def get_data_view_type_comment( request ):
""" Return the comment of a specific data view type.
"""
requested_id = request.REQUEST["data_view_type_id"]
if requested_id == "":
|
text = "Please select a valid data view type."
else:
try:
data_view_type_id = int(requested_id)
text = DataViewType.objects.get(pk=data_view_type_id).comment
except:
text = "Sorry, the configuration help text couldn't be retrieved."
result = { 'comment':text }
return HttpResponse(
|
json.dumps(result), content_type="application/json")
def dataview_to_dict( dataview ):
""" Creates a dicitonary of the dataviews' properties.
"""
return {
'id': dataview.id,
'title': dataview.title,
'code_type': dataview.data_view_type.code_type,
'config': dataview.config,
'note': dataview.comment
}
def get_data_view_type( request, data_view_id ):
""" Returns the type of a particular data view.
"""
dv = get_object_or_404(DataView, pk=data_view_id)
code_type = dv.data_view_type.code_type
return HttpResponse(json.dumps({ 'type': code_type }))
def get_available_data_views( request ):
""" Returns a list of all available data views.
"""
all_views = DataView.objects.order_by("position")
dataviews = map(dataview_to_dict, all_views)
return HttpResponse(json.dumps(makeJSON_legacy_list(dataviews)), content_type="application/json")
def get_default_properties( request ):
""" Return the properies of the default data view.
"""
default = DataView.objects.filter(is_default=True)[0]
default = dataview_to_dict( default )
return HttpResponse(json.dumps(default), content_type="application/json")
def get_default_data_view( request ):
""" Return the data view that is marked as the default. If there
is more than one view marked as default, the first one is returned.
"""
default = DataView.objects.filter(is_default=True)[0]
return get_data_view( request, default.id )
def natural_sort(l, field):
""" Natural sorting of a list wrt. to its 'title' attribute.
Based on: http://stackoverflow.com/questions/4836710
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', getattr(key, field)) ]
return sorted(l, key = alphanum_key)
def get_data_view( request, data_view_id ):
""" Returns a rendered template for the given view.
"""
# Load the template
dv = get_object_or_404(DataView, pk=data_view_id)
code_type = dv.data_view_type.code_type
template = loader.get_template( "catmaid/" + code_type + ".html" )
# Get project information and pass all to the template context
config = json.loads( dv.config )
# Get all the projects that are visible for the current user
projects = get_project_qs_for_user(request.user).prefetch_related('stacks')
# If requested, filter projects by tags. Otherwise, get all.
if "filter_tags" in config:
filter_tags = config["filter_tags"]
# Only get projects that have all the filter tags set
projects = projects.filter( tags__name__in=filter_tags ).annotate(
repeat_count=Count("id") ).filter( repeat_count=len(filter_tags) )
# Build a stack index
stack_index = defaultdict(list)
stacks_of = defaultdict(list)
for p in projects:
for s in p.stacks.all():
stack_index[s.id] = s
stacks_of[p.id].append(s)
# Extend the project list with additional information like editabilty
projects = extend_projects( request.user, projects )
# Sort by default
if "sort" not in config or config["sort"] == True:
projects = natural_sort( projects, "title" )
# Build project index
project_index = dict([(p.id, p) for p in projects])
project_ids = set(project_index.keys())
# Build tag index
ct = ContentType.objects.get_for_model(Project)
tag_links = TaggedItem.objects.filter(content_type=ct) \
.values_list('object_id', 'tag__name')
tag_index = defaultdict(set)
for pid, t in tag_links:
if pid in project_ids:
tag_index[t].add(pid)
context = Context({
'data_view': dv,
'projects': projects,
'config': config,
'settings': settings,
'tag_index': tag_index,
'project_index': project_index,
'stack_index': stack_index,
'stacks_of': stacks_of,
'STATIC_URL': settings.STATIC_URL,
})
return HttpResponse( template.render( context ) )
|
joeywen/zarkov
|
zarkov/config.py
|
Python
|
apache-2.0
| 10,451 | 0.003158 |
'''Handle configuration for zarkov.
We support full configuration on the command line with defaults supplied by
either an .ini-style config file or a yaml (and thus json) config file.
'''
import sys
import logging.config
from optparse import OptionParser
from ConfigParser import ConfigParser
import yaml
import colander
import ming
log = logging.getLogger(__name__)
re_zmq = colander.Regex(
r'(tcp|inproc)://(.+?)(:\d+)?',
'Invalid zeromq URI')
re_ip_port = colander.Regex(
r'(.+?)(:\d+)?',
'Invalid address')
re_mongodb = colander.Regex(
r'(mongodb|mim)://(.+?)(:\d+)?',
'Invalid mongodb URI')
class BackdoorSchema(colander.MappingSchema):
command = colander.SchemaNode(colander.String())
port=colander.SchemaNode(colander.Int())
class BackdoorsSchema(colander.SequenceSchema):
backdoor = BackdoorSchema()
class LogStreamPluginSchema(colander.MappingSchema):
entry_point = colander.SchemaNode(colander.String())
port = colander.SchemaNode(colander.Int())
class LogStreamSchema(colander.SequenceSchema):
plugin = LogStreamPluginSchema()
class ZeroMQURIs(colander.SequenceSchema):
uri = colander.SchemaNode(
colander.String(), validator=re_zmq)
class LoadBalanceSchema(colander.MappingSchema):
incoming_bind = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6543')
outgoing_uris = ZeroMQURIs()
class WebEventSchema(colander.MappingSchema):
bind = colander.SchemaNode(
colander.String(), validator=re_ip_port)
class DBSchema(colander.MappingSchema):
name = colander.SchemaNode(colander.String())
master = colander.SchemaNode(
colander.String(), validator=re_mongodb,
missing='mongodb://localhost:27017')
database = colander.SchemaNode(colander.String())
use_gevent = colander.SchemaNode(colander.Bool(), missing=True)
class ExtraDBSchema(colander.SequenceSchema):
dbs = DBSchema()
class ZMRConfigSchema(colander.MappingSchema):
req_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://127.0.0.1:5555')
req_bind = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:5555')
worker_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0')
reduce_count = colander.SchemaNode(
colander.Int(), missing=256)
event_limit = colander.SchemaNode(
colander.Int(), missing=100000)
job_root = colander.SchemaNode(
colander.String(), missing='/tmp/zmr')
map_chunk_size = colander.SchemaNode(
colander.Int(), missing=5e6)
map_chunks_per_page = colander.SchemaNode(
colander.Int(), missing=20)
outstanding_chunks = colander.SchemaNode(
colander.Int(), missing=4)
max_chunk_timeout = colander.SchemaNode(
colander.Int(), missing=600)
request_greenlets = colander.SchemaNode(
colander.Int(), missing=16)
compress = colander.SchemaNode(
colander.Int(), missing=0)
src_port = colander.SchemaNode(
colander.Int(), missing=0)
sink_port = colander.SchemaNode(
colander.Int(), missing=0)
processes_per_worker = colander.SchemaNode(
colander.Int(), missing=0)
requests_per_worker_process = colander.SchemaNode(
colander.Int(), missing=256)
suicide_level = colander.SchemaNode(
colander.Int(), missing=3 * 2**20)
class ConfigSchema(colander.MappingSchema):
bson_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://127.0.0.1:6543')
json_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://1227.0.0.1:6544')
bson_bind_address = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6543')
json_bind_address = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6544')
publish_bind_address = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6545')
web_port = colander.SchemaNode(colander.Int(), missing=8081)
backdoor = BackdoorsSchema(missing=[])
mongo_uri = colander.SchemaNode(
colander.String(), validator=re_mongodb,
missing='mongodb://localhost:27017')
mongo_database = colander.SchemaNode(
colander.String(), missing='zarkov')
mongo_username = colander.SchemaNode(
colander.String(), missing=None)
mongo_password = colander.SchemaNode(
colander.String(), missing=None)
verbose = colander.SchemaNode(
colander.Bool(), missing=False)
incremental = colander.SchemaNode(
colander.Bool(), missing=True)
num_event_servers = colander.SchemaNode(
colander.Int(), missing=0)
num_event_logs = colander.SchemaNode(
colander.Int(), missing=4)
journal = colander.SchemaNode(
colander.String(), missing='journal')
journal_file_size = colander.SchemaNode(
colander.Int(), missing=2**18)
journal_min_files = colander.SchemaNode(
colander.Int(), missing=4)
zmr = ZMRConfigSchema(missing=None)
logstream = LogStreamSchema(missing=None)
loadbalance = LoadBalanceSchema(missing=None)
webevent = WebEventSchema(missing=None)
extra_dbs = ExtraDBSchema(missing=[])
extra = colander.SchemaNode(colander.Mapping(), missing={})
flush_mmap = colander.SchemaNode(
colander.Bool(), missing=False)
def configure(args=None):
'''Load the options and configure the system'''
if args is None: args = sys.argv
options, args = get_options(args)
if options.verbose:
log.info('Settings:')
for k,v in sorted(options.__dict__.items()):
log.info(' %s: %r', k, v)
ming_config = {
'ming.zarkov.master':options.mongo_uri,
'ming.zarkov.database':options.mongo_database,
'ming.zarkov.use_gevent':True}
for dbinfo in options.extra_dbs:
dbinfo = dict(dbinfo)
prefix = 'ming.%s.' % dbinfo.pop('name')
for k,v in dbinfo.items():
ming_config[prefix + k] = v
if options.mongo_username:
ming_config['ming.zarkov.authenticate.name'] = options.mongo_username
if options.mongo_username:
ming_config['ming.zarkov.authenticate.password'] = options.mongo_password
ming.configure(**ming_config)
if optio
|
ns.pdb:
sys.excepthook = post
|
mortem_hook
return options, args
def get_options(argv):
'''Load the options from argv and any config files specified'''
defaults=dict(
bind_address='tcp://0.0.0.0:6543',
backdoor=None,
password=None,
mongo_uri='mongodb://127.0.0.1:27017',
mongo_database='zarkov',
journal='journal',
verbose=False,
incremental=10)
optparser = get_parser(defaults)
options, args = optparser.parse_args(argv)
config_schema = ConfigSchema()
defaults.update(config_schema.deserialize({}))
if options.ini_file:
config = ConfigParser()
config.read(options.ini_file)
log.info('About to configure logging')
logging.config.fileConfig(options.ini_file, disable_existing_loggers=False)
log.info('Configured logging')
if config.has_section('zarkov'):
defaults.update(
(k, eval(v)) for k,v in config.items('zarkov'))
if options.yaml_file:
with open(options.yaml_file) as fp:
yaml_obj = yaml.load(fp.read())
yaml_obj = config_schema.deserialize(yaml_obj)
if yaml_obj:
defaults.update(yaml_obj)
else:
log.warning('No configuration found -- empty yaml file %r?',
options.yaml_file)
optparser = get_parser(defaults)
options, args = optparser.parse_args(argv)
return options, args
def get_parser(defaults):
'''Build a command line OptionParser based on the given defaults'''
optparser = OptionParser(
usage=('%prog [--options]'))
optparser.set_defaults(**defau
|
opendata/lmgtdfy
|
lmgtfy/views.py
|
Python
|
mit
| 4,151 | 0.003854 |
import csv
from django.contrib import messages
from django.shortcuts import HttpResponseRedirect, resolve_url, HttpResponse
from django.views.generic import FormView, ListView
from lmgtfy.forms import MainForm
from lmgtfy.helpers import search_bing, check_valid_tld
from lmgtfy.models import Domain, DomainSearch, DomainSearchResult
class MainView(FormView):
template_name = 'main.html'
form_class = MainForm
success_url = '.'
def get_context_data(self, **kwargs):
context = super(MainView, self).get_context_data(**kwargs)
domains_and_latest_counts = []
for domain in Domain.objects.order_by("-id")[:50]:
domain_search_latest = domain.domainsearch_set.all().last()
if not domain_search_latest:
continue
count = domain_search_latest.domainsearchresult_set.count()
domains_and_latest_counts.append((domain.name, count))
context['table_data'] = domains_and_latest_counts
return context
def form_valid(self, form):
data = form.cleaned_data
domain = data['domain_base']
domain_is_whitelisted = check_valid_tld(domain)
if not domain_is_whitelisted:
messages.info(
self.request,
"Sorry, but to limit the cost of running this service, we have not enabled searching this domain name (%s)." % domain
)
return HttpResponseRedirect(resolve_url('home'))
search_done = search_bing(domain)
if not search_done:
messages.info(
self.request,
"This domain has already been requested today! Here is what we've gathered."
)
else:
messages.info(
self.request,
"Gathering results now. They will be displayed shortly."
)
return HttpResponseRedirect(
resolve_url('do
|
main_result', domain)
)
main_vie
|
w = MainView.as_view()
class SearchResultView(ListView):
template_name = 'result.html'
model = DomainSearchResult
success_url = '.'
def get_queryset(self):
qs = super(SearchResultView, self).get_queryset()
try:
domain = self.kwargs['domain']
fmt = self.kwargs.get('fmt')
except:
raise Exception('Invalid URL parameter has been passed.')
qs = qs.filter(
search_instance__domain__name=domain
).order_by('result')
if fmt:
qs = qs.filter(fmt=fmt)
return qs
def get_context_data(self, **kwargs):
context = super(SearchResultView, self).get_context_data(**kwargs)
domain_name = self.kwargs['domain']
context['domain_name'] = domain_name
context['format'] = self.kwargs.get('fmt')
self.kwargs['fmt'] = None # clear the format
# so that we get a list of all of the formats for the domain
qs = set(self.get_queryset().values_list('fmt', flat=True))
context['file_formats'] = list(qs)
domain = Domain.objects.filter(name=domain_name)
search_being_performed = len(DomainSearch.objects.filter(domain=domain, completed_at=None)) > 0
if search_being_performed:
messages.info(
self.request,
"We're gathering more results right now. This page will refresh in 10 seconds."
)
context['refresh_counter'] = 10
return context
search_result_view = SearchResultView.as_view()
def get_csv(request, domain):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % domain
writer = csv.writer(response)
qs = DomainSearchResult.objects.filter(
search_instance__domain__name=domain
).order_by('result').distinct()
writer.writerow(["title", "format", "kilobyte_size", "url"])
for result in qs:
writer.writerow([result.title.encode("utf-8"), result.fmt.encode("utf-8"),
result.size, result.result.encode("utf-8")])
return response
|
JohnUrban/fast5tools
|
bin/filterFast5DerivedFastx.py
|
Python
|
mit
| 11,145 | 0.007447 |
#!/usr/bin/env python2.7
import os, sys
from Bio import SeqIO
import argparse
from fast5tools.fxclass import *
#################################################
## Argument Parser
#################################################
parser = argparse.ArgumentParser(description = """
Given path(s) to fasta/fastq file(s) and/or directories containing them,
return sequences filtered by any of the following:
1. read type (2d, template, complement)
2. length
3. mean quality score
4. channel number
5. read number
6. ASIC ID
7. Run ID
8. Device ID
9. Base-calling model
Return in same format as input or choose:
fasta, fastq, qual, intqual, oldfalcon, newfalcon
Note: newfalcon output is fasta with falcon-compatible headers.
For newer Falcon:
>asic_run_device_basecallingmodel/i/0_readlen OtherInfo
Where
i is order it was encountered in
OtherInfo will include readtype,mean quality score, read number, channel number
For oldfalcon output:
>000_000/i/0_readlen OriginalFastaHeader
Where i is number read is encountered in.
TODO: fastaqual, fastaintqual
NOTE:
Fasta can be converted to fastq or quals, BUT the quals will not be correct per se.
First, they will be related to the mean q-score (Q).
Second, they will be rounded to the nearest int.
Thus, the mean q-score in the header/seqname will not be consistent with the mean of the quality scores.
It is related by int(round(Q)).
For now, input files are fasta, fastq, or dirs with them.
TODO: Allow tar/tarlite approach. Allow gzipped. Allow FOFN.
TODO: falconizeFast5DerivedFastx.py for more options and more description/info.
John Urban (2015, 2016)
""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('fastx', metavar='fastx', nargs='+',
type= str,
help='''Paths to as many fasta and/or fastq files and/or directories filled with them as you want.
Assumes all fasta files have '.fasta', '.fa', and/or '.fna' extensions (only accepts these).
Assumes all fastq files have '.fastq' or '.fq' extensions (only accepts these).
Assumes given that given one of the above extensions, the internal formatting is consistent with either fasta or fastq.
If inside dir of dirs with desired files, then can just do "*" to get all files from all dirs.''')
parser.add_argument('-r', '--readtype', default="all",
type= str,
help='''Choose type of fasta to get.
Choices: 'template', 'complement', '2d', 'molecule', 'all', 'MoleQual'.
Default: all.
There is no need to write full word for options - can do: t, c, 2, m, a, M.
CAUTION: for now, if the word given begins with "m" it is "molecule"; if "M", it is MoleQual. This means that 'molequal' will return 'molecule' results. etc.
Molecule returns single fastx for when there is more than 1 record for a given Channel#/Read#/Run_ID/ASIC_ID:
if 2d present, return 2d.
elif complement present with no 2d, return longer of template or complement.
elif only template present, return template.
'MoleQual' is similar to molecule.
It differs only in choosing between template and complement when a 2D is not present.
Instead of choosing the longer one, it chooses the one with a higher quality mean quality score.
NOTE: it is assumed that reads from same molecule (shared Channel#/Read#/Run_ID/ASIC_ID)
are clustered together (i.e. occur consecutively) in given input.
If not, then molecule and MoleQual protocols will not work as expected.''')
parser.add_argument('-i', '--intype', type=str, default='fasta',
help=''' Choices: fasta, fastq, input.
Default: fasta.
Note: input (one or both formats found in input).
Declare which input types are to be explored for filtering.
Since one can direct this script at directories that may contain both fasta and fastq,
this gives an extra level of awareness to explore only a given file type (or both).
One may also want to look at both fasta and fastq, but output only fasta (see -o).
In rare cases, one may want to read in both types, and return the same type (-i input, -o input).
For now all output is directed to stdout, so the latter case is not recommended.
In the future, if output from each given input file can automatically be directed to a similarly named
output file (with .filtered. added in, then it might make more sense.''')
parser.add_argument('-o', '--outtype', type=str, default='fasta',
help = '''Choices: input, fasta, fastq, qual, intqual, falcon.
Default: fasta.
Note: input (whatever format the file comes in as).
See -i for discussion on use cases.
falcon: returns fasta but with fasta headers compatible with FALCON assembler.
TODO:
fastaqual, fastaintqual''')
parser.add_argument('--minlen', type=int, default=0, help='''Only report reads >= minlen. Default: 0 bp.''')
parser.add_argument('--maxlen', type=int, default=int(3e9), help='''Only report reads <= maxlen. Default: 3 billion bp.''')
parser.add_argument('--minq', type=float, default=0, help='''Only report reads with mean quality scores >= Q. Default: 0.''')
parser.add_argument('--maxq', type=float, default=int(10e3), help='''Only report reads with mean quality scores <= Q.
Default: 10000 (this is orders of magnitude higher than normal max which are always < 20)''')
parser.add_argument('--channel', type=str, default=None, help='''Only report reads from given channel number. Default: reports from any/all channels present.''')
parser.add_argument('--readnum', type=str, default=None, help='''Only report reads with given read number. Default: reports from any/all read numbers.''')
parser.add_argument('--asic', type=str, default=None, help='''Only report reads with given asic ID. Default: reports from any/all ASIC IDs present.''')
parser.add_argument('--run', type=str, default=None, help='''Only report reads with given run ID. Default: reports from any/all Run IDs present.''')
parser.add_argument('--device', type=str, default=None, help='''Only report reads with given device ID. Default: reports from any/all Device IDs present.''')
parser.add_argument('--model', type=str, default=None, help='''Only report reads with given bas-calling model ID. Default: reports from any/all basecalling IDs present.''')
parser.add_argument('--rule', type=str, default='and', help='''Require each sequence to pass ALL the filters (use 'and') or pass at least N filters (use 'or')''')
parser.add_argument('--minscore', type=int, default=1, help='''If requiring sequences only pass at least N filters (--rule 'or'), then specify minimum number of filters to pass. Default: 1.''')
##parser.add_argument('--tarlite', action='store_true', default=False, help=''' This method extracts 1 file from a given tarchive at a time, processes, and deletes it.
##The older still-default routine extracts the entirety of all given tarchives at once, then processes files.
##The default method will therefore require >2*tarchive amount of disk space (i.e. the tar.gz and its extracted contents).
##The tarlite method only requires the disk space already taken by the tarchive and enough for 1 additional file at a time.
##Tarlite may become the default method after some testing if it performs at similar speeds.''')
args = parser.parse_args()
#################################################
## deal with some of the arguments
#################################################
assert args.outtype in ("fasta", "fastq", "qual", "intqual", "falcon", "oldfalcon", "newfalcon")
assert args.intype in ("input", "fasta", "fastq", "both")
assert args.readtype[0] in "tc2maM"
if args.readtype[0] == "t":
args.readtype = "template"
elif args.readtype[0] == "c":
args.readtype = "complement"
elif args.readtype[0] == "2":
|
args.readtype = "2d"
elif args.readtype[0] == "m":
args.readtype = "molecule"
elif args.readtype[0] == "a":
args.readtype = "all"
|
elif args.readtype[0] == "M":
args.readtype = "MoleQual"
if args.intype == 'input' or args.intype == "both":
intypes = ['fasta', 'fastq']
elif args.intype == 'fasta':
intypes = ['fasta']
elif args.intype == 'fastq':
intypes = ['fastq']
def filter_by_entry(readtype, minlen, maxlen, minq, maxq, channel, readnum, asi
|
Distrotech/scons
|
test/Fortran/F95COMSTR.py
|
Python
|
mit
| 2,501 | 0.001599 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myfc.py', r"""
import sys
fline = '#'+sys.argv[1]+'\n'
outfile = open(sys.argv[2], 'wb')
infile = open(sys.argv[3], 'rb')
for l in [l for l in infile.readlines() if l != fline]:
outfile.write(l)
sys.exit(0)
""")
if not TestSCons.case_sensitive_suffixes('.f','.F'):
f95pp = 'f95'
else:
f95pp = 'f95pp'
test.write('SCons
|
truct', """
env = Environment(F95COM = r'%(_python_)s myfc.py f95 $TARGET $SOURCES',
F95COMSTR = 'Building f95 $TARGET from $SOURCES',
F95PPCOM = r'%(_python_)s myfc.py f95pp $TARGET $SOURCES',
|
F95PPCOMSTR = 'Building f95pp $TARGET from $SOURCES',
OBJSUFFIX='.obj')
env.Object(source = 'test01.f95')
env.Object(source = 'test02.F95')
""" % locals())
test.write('test01.f95', "A .f95 file.\n#f95\n")
test.write('test02.F95', "A .F95 file.\n#%s\n" % f95pp)
test.run(stdout = test.wrap_stdout("""\
Building f95 test01.obj from test01.f95
Building %(f95pp)s test02.obj from test02.F95
""" % locals()))
test.must_match('test01.obj', "A .f95 file.\n")
test.must_match('test02.obj', "A .F95 file.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
nkgilley/home-assistant
|
homeassistant/components/vesync/switch.py
|
Python
|
apache-2.0
| 3,095 | 0.000323 |
"""Support for Etekcity VeSync switches."""
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .common import VeSyncDevice
from .const import DOMAIN, VS_DISCOVERY, VS_DISPATCHERS, VS_SWITCHES
_LOGGER = logging.getLogger(__name__)
DEV_TYPE_TO_HA = {
"wifi-switch-1.3": "outlet",
"ESW03-USA": "outlet",
"ESW01-EU": "outlet",
"ESW15-USA": "outlet",
"ESWL01": "switch",
"ESWL03": "switch",
"ESO15-TB": "outlet",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches."""
async def async_discover(devices):
"""Add new devices to platform."""
_async_setup_entities(devices, async_add_entities)
disp = async_dispatcher_connect(
hass, VS_DISCOVERY.format(VS_SWITCHES), async_discover
)
hass.data[DOMAIN][VS_DISPATCHERS].append(disp)
_async_setup_entities(hass.data[DOMAIN][VS_SWITCHES], async_add_entities)
return True
@callback
def _async_setup_entities(devices, async_add_entities):
"""Check if device is online and add entity."""
dev_list = []
for dev in devices:
if DEV_TYPE_TO_HA.get(dev.device_type) == "outlet":
dev_list.append(VeSyncSwitchHA(dev))
elif DEV_TYPE_TO_HA.get(dev.device_type) == "switch":
dev_list.append(VeSyncLightSwitch(dev))
else:
_LOGGER.warning(
"%s - Unknown device type - %s", dev.device_name, dev.device_type
)
continue
async_add_entities(dev_list, update_before_add=True)
class VeSyncSwitchHA(VeSyncDevice, SwitchEntity):
"""Representation of a VeSync switch."""
def __in
|
it__(self, plug):
"""Initialize the VeSync switch device."""
super().__init__(plug)
self.smartplug = plug
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if hasattr(self.smartplug, "weekly_energy_total"):
attr["voltage"]
|
= self.smartplug.voltage
attr["weekly_energy_total"] = self.smartplug.weekly_energy_total
attr["monthly_energy_total"] = self.smartplug.monthly_energy_total
attr["yearly_energy_total"] = self.smartplug.yearly_energy_total
return attr
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.smartplug.power
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return self.smartplug.energy_today
def update(self):
"""Update outlet details and energy usage."""
self.smartplug.update()
self.smartplug.update_energy()
class VeSyncLightSwitch(VeSyncDevice, SwitchEntity):
"""Handle representation of VeSync Light Switch."""
def __init__(self, switch):
"""Initialize Light Switch device class."""
super().__init__(switch)
self.switch = switch
|
WarrenWeckesser/scipy
|
scipy/stats/_rvs_sampling.py
|
Python
|
bsd-3-clause
| 7,177 | 0 |
import numpy as np
from scipy._lib._util import check_random_state
def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is proportional to the
probability density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
Notes
-----
Given a univariate probability density function `pdf` and a constant `c`,
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
If `(U, V)` is a random vector uniformly distributed over `A`,
then `V/U + c` follows a distribution according to `pdf`.
The above result (see [1]_, [2]_) can be used to sample random variables
using only the pdf, i.e. no inversion of the cdf is required. Typical
choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
- ``umax = sup sqrt(pdf(x))``
- ``vmin = inf (x - c) sqrt(pdf(x))``
- ``vmax = sup (x - c) sqrt(pdf(x))``
In particular, these values are finite if `pdf` is bounded and
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
One can generate `(U, V)` uniformly on `R` and return
`V/U + c` if `(U, V)` are also in `A` which can be directly
verified.
The algorithm is not changed if one replaces `pdf` by k * `pdf` for any
constant k > 0. Thus, it is often convenient to work with a function
that is proportional to the probability density function by dropping
unneccessary normalization factors.
Intuitively, the method works well if `A` fills up most of the
enclosing rectangle such that the probability is high that `(U, V)`
lies in `A` whenever it lies in `R` as the number of required
iterations becomes too large otherwise. To be more precise, note that
the expected number of iterations to draw `(U, V)` uniformly
distributed on `R` such that `(U, V)` is also in `A` is given by
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``,
where `area(pdf)` is the integral of `pdf` (which is equal to one if the
probability density function is used but can take on other values if a
function proportional to the density is used). The equality holds since
the area of `A` is equal to 0.5 * area(pdf) (Theorem 7.1 in [1]_).
If the sampling fails to generate a single random variate after 50000
iterations (i.e. not a single draw is in `A`), an exception is raised.
If the bounding rectangle is not correctly specified (i.e. if it does not
contain `A`), the algorithm samples from a distribution different from
the one given by `pdf`. It is therefore recommended to perform a
test such as `~scipy.stats.kstest` as a check.
References
----------
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
Springer-Verlag, 1986.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
Variables Using the Ratio of Uniform Deviates",
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
Simulate normally distributed random variables. It is easy to compute the
bounding rectangle explicitly in that case. For simplicity, we drop the
normalization factor of the density.
>>> f = lambda x: np.exp(-x**2 / 2)
>>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
>>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
>>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500,
... random_state=rng)
The K-S test confirms that the random variates are indeed normally
distributed (normality is not rejected at 5% significance level):
>>> stats.kstest(rvs, 'norm')[1]
0.250634764150542
The exponential distribution provides another example where the bounding
rectangle can be determined explicitly.
>>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
... vmin=0, vmax=2*np.exp(-1), size=1000,
... random_state=rng)
>>> stats.kstest(rvs, 'expon')[1]
0.21121052054580314
"""
if vmin >= vmax:
raise ValueError("vmin must be smaller than
|
vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
size1d = tuple(np.atleast_1d(size))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# start sampling using ratio of uniforms method
rng = check_random_state(random_state)
x = np.zeros(N)
simulated, i = 0, 1
# loop until N rvs have been generated: expected runtime is finite.
# to avoi
|
d infinite loop, raise exception if not a single rv has been
# generated after 50000 tries. even if the expected numer of iterations
# is 1000, the probability of this event is (1-1/1000)**50000
# which is of order 10e-22
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u1 = umax * rng.uniform(size=k)
v1 = rng.uniform(vmin, vmax, size=k)
# apply rejection method
rvs = v1 / u1 + c
accept = (u1**2 <= pdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated in {} "
"attempts. The ratio of uniforms method does not appear "
"to work for the provided parameters. Please check the "
"pdf and the bounds.".format(i*N))
raise RuntimeError(msg)
i += 1
return np.reshape(x, size1d)
|
LTKills/languages
|
python/data_structures/strings.py
|
Python
|
gpl-3.0
| 36 | 0.027778 |
string = input()
string[
|
0] = "a"
| |
DISMGryphons/GryphonCTF2017-Challenges
|
challenges/misc/Spirals/solution/solution.py
|
Python
|
gpl-3.0
| 812 | 0.032338 |
#!/usr/bin/env python3
import re
a = [[0 for x in range(25)] for y in range(13)]
f=open("../distrib/spiral.txt","r")
s=f.readline().strip()
dx, dy = [0, 1, 0, -1], [1, 0, -1, 0]
x, y, c = 0, -1, 1
l=0
for i in range(13+13-1):
if i%2==0:
for j in range((25+25-i)//2):
x += dx[i % 4]
y += dy[i % 4]
#print(x,y,l)
a[x][y] = s[l]
l=l+1
|
c += 1
else:
for j in range((13+13-i)//2):
x +=
|
dx[i % 4]
y += dy[i % 4]
#print(x,y,l)
a[x][y] = s[l]
l=l+1
c += 1
for i in a:
for k in i:
k=re.sub(r"¦","█",k)
k=re.sub(r"¯","▀",k)
k=re.sub(r"_","▄",k)
print(k,end="")
print()
|
ge0rgi/cinder
|
cinder/volume/manager.py
|
Python
|
apache-2.0
| 211,218 | 0.000199 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2017 Georgi Georgiev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
#ge0rgi:added is_volume_trusted
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.scheduler.filters.asset_tag_filter import TrustAssertionFilter
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
'cinder.volume.drivers.emc.scaleio':
'cinder.volume.drivers.dell_emc.scaleio.driver',
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver':
'cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver',
'cinder.volume.drivers.datera.DateraDriver':
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver',
'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver',
'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver':
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver',
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver':
'cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwa
|
rgs
|
):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_na
|
openstack/ironic
|
ironic/drivers/modules/storage/cinder.py
|
Python
|
apache-2.0
| 20,389 | 0 |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
# Copyright 2016 IBM Corp
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import strutils
import tenacity
from ironic.common import cinder
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.drivers import base
from ironic.drivers import utils
from ironic import objects
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# NOTE(TheJulia): Sets containing known valid types that align with
# _generate_connector() and the volume connection information spec.
VALID_ISCSI_TYPES = ('iqn',)
# TODO(TheJulia): FCoE?
VALID_FC_TYPES = ('wwpn', 'wwnn')
class CinderStorage(base.StorageInterface):
"""A storage_interface driver supporting Cinder."""
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return {}
def _fail_validation(self, task, reason,
exception=exception.InvalidParameterValue):
msg = (_("Failed to validate cinder storage interface for node "
"%(node)s. %(reason)s") %
{'node': task.node.uuid, 'reason': reason})
LOG.error(msg)
raise exception(msg)
def _validate_connectors(self, task):
"""Validate connector information helper.
Enumerates through all connector objects, and identifies if
iSCSI or Fibre Channel connectors are present.
:param task: The task object.
:raises InvalidParameterValue: If iSCSI is identified and
iPXE is disabled.
:raises StorageError: If the number of wwpns is not equal to
the number of wwnns
:returns: Dictionary containing iscsi_found and fc_found
keys with boolean values representing if the
helper found that connector type configured
for the node.
"""
node = task.node
iscsi_uuids_found = []
wwpn_found = 0
wwnn_found = 0
ipxe_enabled = False
if 'ipxe_boot' in task.driver.boot.capabilities:
ipxe_enabled = True
for connector in task.volume_connectors:
if (connector.type in VALID_ISCSI_TYPES
and connector.connector_id is not None):
iscsi_uuids_found.append(connector.uuid)
if not ipxe_enabled:
msg = _("The [pxe]/ipxe_enabled option must "
"be set to True or the boot interface "
"must be set to ``ipxe`` to support network "
"booting to an iSCSI volume.")
self._fail_validation(task, msg)
if (connector.type in VALID_FC_TYPES
and connector.connector_id is not None):
# NOTE(TheJulia): Unlike iSCSI with cinder, we have no need
# to warn about multiple IQN entries, since we are able to
# submit multiple fibre channel WWPN entries.
if connector.type == 'wwpn':
wwpn_found += 1
if connector.type == 'wwnn':
wwnn_found += 1
if len(iscsi_uuids_found) > 1:
LOG.warning("Multiple possible iSCSI connectors, "
"%(iscsi_uuids_found)s found, for node %(node)s. "
"Only the first iSCSI connector, %(iscsi_uuid)s, "
"will be utilized.",
{'node': node.uuid,
'iscsi_uuids_found': iscsi_uuids_found,
'iscsi_uuid': iscsi_uuids_found[0]})
if wwpn_found != wwnn_found:
msg = _("Cinder requires both wwnn and wwpn entries for FCoE "
"connections. There must be a wwpn entry for every wwnn "
"entry. There are %(wwpn)d wwpn entries and %(wwnn)s wwnn "
"entries.") % {'wwpn': wwpn_found, 'wwnn': wwnn_found}
self._fail_validation(task, msg, exception.StorageError)
return {'fc_found': wwpn_found >= 1,
'iscsi_found': len(iscsi_uuids_found) >= 1}
def _validate_targets(self, task, found_types, iscsi_boot, fc_boot):
"""Validate target information helper.
Enumerates through all target objects and identifies if
iSCSI or Fibre Channel targets are present, and matches the
connector capability of the node.
:param task: The task object.
:param found_types: Dictionary containing boolean values returned
from the _validate_connectors helper method.
:param iscsi_boot: Boolean value indicating if iSCSI boot operations
are available.
:param fc_boot: Boolean value indicating if Fibre Channel boot
operations are available.
:raises: InvalidParameterValue
"""
for volume in task.volume_targets:
if volume.volume_id is None:
msg = (_("volume_id missing from target %(id)s.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
# NOTE(TheJulia): We should likely consider incorporation
# of the volume boot_index field, however it may not be
# relevant to the checks we perform here as in the end a
# FC volume "attached" to a node is a valid configuration
# as well.
# TODO(TheJulia): When we create support in nova to record
# that a volume attachment is going to take place, we will
# likely need to match the driver_volume_type field to
# our generic volume_type field. NB The LVM driver appears
# to not use that convention in cinder, as it is freeform.
if volume.volume_type == 'fibre_channel':
if not fc_boot and volume.boot_index == 0:
msg = (_("Volume target %(id)s is configured for "
"'fibre_channel', however the capability "
"'fibre_channel_boot' is not set on node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
if not found_types['fc_found'
|
]:
msg = (_("Volume target %(id)s is configured for "
"'fibre_channel', however no Fibre Channel "
"WWPNs are configured for the node volume "
"connectors.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
elif volume.volum
|
e_type == 'iscsi':
if not iscsi_boot and volume.boot_index == 0:
msg = (_("Volume target %(id)s is configured for "
"'iscsi', however the capability 'iscsi_boot' "
"is not set for the node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
if not found_types['iscsi_found']:
msg = (_("Volume target %(id)s is configured for "
"'iscsi', however no iSCSI connectors are "
"configured for the node.") %
{'id': volume.uuid})
self._fail
|
Cyberjusticelab/JusticeAI
|
src/ml_service/feature_extraction/pre_processing/pre_processing_driver.py
|
Python
|
mit
| 168 | 0.005952 |
from f
|
eature_extraction.pre_processing.filter_precedent import precendent_directory_cleaner
def run(command_list):
precendent_directory_cleaner.run(command_
|
list)
|
jozefg/solar-system
|
src/attrdict.py
|
Python
|
mit
| 148 | 0 |
class
|
AttrDict(dict):
def __init__
|
(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
devops2014/djangosite
|
django/db/backends/base/features.py
|
Python
|
bsd-3-clause
| 9,722 | 0.001234 |
from django.db.models.aggregates import StdDev
from django.db.models.expressions import Value
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures(object):
gis_enabled = False
allows_group_by_pk = False
# True if django.db.backends.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_savepoints = False
can_release_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver support timedeltas as arguments?
# This is only relevant when there is a native duration field.
# Specifically, there is a bug with cx_Oracle:
# https://bitbucket.org/anthony_tuininga/cx_oracle/issue/7/
driver_supports_timedelta_args = False
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend determine reliably the length of a CharField?
can_introspect_max_length = True
# Can the backend determine reliably if a field is nullable?
# Note that this is separate from interprets_empty_strings_as_nulls,
# although the latter feature, when true, interferes with correct
# setting (and introspection) of CharFields' nullability.
# This is True for all core backends.
can_introspect_null = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
#
|
supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend
|
require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Does the backend require the sqlparse library for splitting multi-line
# statements before executing them?
requires_sqlparse_for_splitting = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
uppercases_column_names = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connectio
|
wujuguang/sentry
|
tests/sentry/rules/conditions/test_level_event.py
|
Python
|
bsd-3-clause
| 1,452 | 0 |
from sentry.testutils.cases import RuleTestCase
from sentry.rules.conditions.level import LevelCondition, LevelMatchType
class LevelConditionTest(RuleTestCase):
rule_cls = LevelCondition
def get_event(self):
event = self.event
event.group.level = 20
return event
def test_equals(self):
event = self.get_event()
rule = self.get_rule({
'match': LevelMatchType.EQUAL,
'level': '20',
})
self.assertPasses(rule, event)
rule = self.get_rule({
'match': LevelMatchType.EQUAL,
'level': '30',
})
self.assertDoesNotPass(rule, event)
def test_greater_than(self):
event = self.get_event()
rule = self.get_rule({
'match': LevelMatchType.GREATER_OR_EQUAL,
'level': '40',
})
|
self.assertDoesNotPass(rule, event)
rule = self.get_rule({
'match': LevelMatchType.GREATER_OR_EQUAL,
'level': '20',
})
self.assertPasses(rule, event)
def test_l
|
ess_than(self):
event = self.get_event()
rule = self.get_rule({
'match': LevelMatchType.LESS_OR_EQUAL,
'level': '10',
})
self.assertDoesNotPass(rule, event)
rule = self.get_rule({
'match': LevelMatchType.LESS_OR_EQUAL,
'level': '30',
})
self.assertPasses(rule, event)
|
funbaker/astropy
|
astropy/coordinates/tests/test_transformations.py
|
Python
|
bsd-3-clause
| 14,498 | 0.00069 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from ... import units as u
from .. import transformations as t
from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, AltAz
from .. import representation as r
from ..baseframe import frame_transform_graph
from ...tests.helper import (assert_quantity_allclose as assert_allclose,
quantity_allclose, catch_warnings)
from ...time import Time
# Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
class TCoo3(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2,
register_graph=frame_transform_graph)
c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0],
[0, coo.ra.degree, 0],
[0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1*u.deg, dec=2*u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(r
|
a=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0],
[0, 1, 0],
[0, 0, 1]]
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.cartesian.x, 2*u.pc)
|
assert_allclose(c4.cartesian.y, 1*u.pc)
assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
class FakeTransform:
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
# cheating by adding graph elements directly that are not classes - the
# graphing algorithm still works fine with integers - it just isn't a valid
# TransformGraph
# the graph looks is a down-going diamond graph with the lower-right slightly
# heavier and a cycle from the bottom to the top
# also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print('Cached paths:', g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
# unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float('inf')
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from ...utils import NumpyRNGContext
from .. import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950)
fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS)
icrs_75 = fk4_75.transform_to(ICRS)
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
# ------------------------------------------------------------------------------
# Affine transform tests and helpers:
# just acting as a namespace
class transfunc:
rep = r.CartesianRepresentation(np.arange(3)*u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3)*u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize('transfunc', [transfunc.both, transfunc.no_matrix,
transfunc.no_pos, transfunc.no_vel,
transfunc.just_matrix])
@pytest.mark.parametrize('rep', [
r.CartesianRepresentation(5, 6, 7, unit=u.pc),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr)),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr))
.represent_as(r.CylindricalRepresentation, r.CylindricalDifferential)
])
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
_rep = rep.to_cartesian()
diffs = dict([(k, diff.represent_as(r.CartesianDifferential, rep))
for k, diff in rep.differentials.items()])
expected_rep = _rep.with_diff
|
phamelin/ardupilot
|
Tools/autotest/param_metadata/param_parse.py
|
Python
|
gpl-3.0
| 17,332 | 0.002192 |
#!/usr/bin/env python
from __future__ import print_function
import glob
import os
import re
import sys
from argparse import ArgumentParser
from param import (Library, Parameter, Vehicle, known_group_fields,
known_param_fields, required_param_fields, known_units)
from htmlemit import HtmlEmit
from rstemit import RSTEmit
from xmlemit import XmlEmit
from mdemit import MDEmit
from jsonemit import JSONEmit
from xmlemit_mp import XmlEmitMP
parser = ArgumentParser(description="Parse ArduPilot parameters.")
parser.add_argument("-v", "--verbose", dest='verbose', action='store_true', default=False, help="show debugging output")
parser.add_argument("--vehicle", required=True, help="Vehicle type to generate for")
parser.add_argument("--no-emit",
dest='emit_params',
action='store_false',
default=True,
help="don't emit parameter documention, just validate")
parser.add_argument("--format",
dest='output_format',
action='store',
default='all',
choices=['all', 'html', 'rst', 'wiki', 'xml', 'json', 'edn', 'md', 'xml_mp'],
help="what output format to use")
parser.add_argument("--sitl",
dest='emit_sitl',
action='store_true',
default=False,
help="true to only emit sitl parameters, false to not emit sitl parameters")
args = parser.parse_args()
# Regular expressions for parsing the parameter metadata
prog_param = re.compile(r"@Param(?:{([^}]+)})?: (\w+).*((?:\n[ \t]*// @(\w+)(?:{([^}]+)})?: ?(.*))+)(?:\n[ \t\r]*\n|\n[ \t]+[A-Z])", re.MULTILINE)
# match e.g @Value: 0=Unity, 1=Koala, 17=Liability
prog_param_fields = re.compile(r"[ \t]*// @(\w+): ?([^\r\n]*)")
# match e.g @Value{Copter}: 0=Volcano, 1=Peppermint
prog_param_tagged_fields = re.compile(r"[ \t]*// @(\w+){([^}]+)}: ([^\r\n]*)")
prog_groups = re.compile(r"@Group: *(\w+).*((?:\n[ \t]*// @(Path): (\S+))+)", re.MULTILINE)
apm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../')
vehicle_paths = glob.glob(apm_path + "%s/Parameters.cpp" % args.vehicle)
apm_tools_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../Tools/')
vehicle_paths += glob.glob(apm_tools_path + "%s/Parameters.cpp" % args.vehicle)
vehicle_paths.sort(reverse=True)
vehicles = []
libraries = []
# AP_Vehicle also has parameters rooted at "", but isn't referenced
# from the vehicle in any way:
ap_vehicle_lib = Library("") # the "" is tacked onto the front of param name
setattr(ap_vehicle_lib, "Path", os.path.join('..', 'libraries', 'AP_Vehicle', 'AP_Vehicle.cpp'))
libraries.append(ap_vehicle_lib)
error_count = 0
current_param = None
current_file = None
def debug(str_to_print):
"""Debug output if verbose is set."""
if args.verbose:
print(str_to_print)
def error(str_to_print):
"""Show errors."""
global error_count
error_count += 1
if current_file is not None:
print("Error in %s" % current_file)
if current_param is not None:
print("At param %s" % current_param)
print(str_to_print)
truename_map = {
"Rover": "Rover",
"ArduSub": "Sub",
"ArduCopter": "Copter",
"ArduPlane": "Plane",
"AntennaTracker": "Tracker",
"AP_Periph": "AP_Periph",
}
valid_truenames = frozenset(truename_map.values())
for vehicle_path in vehicle_paths:
name = os.path.basename(os.path.dirname(vehicle_path))
path = os.path.normpath(os.path.dirname(vehicle_path))
vehicles.append(Vehicle(name, path, truename_map[name]))
debug('Found vehicle type %s' % name)
if len(vehicles) > 1 or len(vehicles) == 0:
print("Single vehicle only, please")
sys.exit(1)
for vehicle in vehicles:
debug("===\n\n\nProcessing %s" % vehicle.name)
current_file = vehicle.path+'/Parameters.cpp'
f = open(current_file)
p_text = f.read()
f.close()
group_matches = prog_groups.findall(p_text)
debug(group_matches)
for group_match in group_matches:
lib = Library(group_match[0])
fields = prog_param_fields.findall(group_match[1])
for field in fields:
if field[0] in known_group_fields:
setattr(lib, field[0], field[1])
else:
error("group: unknown parameter metadata field '%s'" % field[0])
if not any(lib.name == parsed_l.name for parsed_l in libraries):
libraries.append(lib)
param_matches = []
if not args.emit_sitl:
param_matches = prog_param.findall(p_text)
for param_match in param_matches:
(only_vehicles, param_name, field_text) = (param_match[0],
param_match[1],
param_match[2])
if len(only_vehicles):
only_vehicles_list = [x.strip() for x in only_vehicles.split(",")]
for only_vehicle in only_vehicles_list:
if only_vehicle not in valid_truenames:
raise ValueError("Invalid only_vehicle %s" % only_vehicle)
if vehicle.truename not in only_vehicles_list:
continue
p = Parameter(vehicle.name+":"+param_name, current_file)
debug(p.name + ' ')
current_param = p.name
fields = prog_param_fields.findall(field_text)
field_list = []
for field in fields:
field_list.append(field[0])
if field[0] in known_param_fields:
value = re.sub('@PREFIX@', "", field[1]).rstrip()
setattr(p, field[0], value)
else:
error("param: unknown parameter metadata field '%s'" % field[0])
for req_field in required_param_fields:
if req_field not in field_list:
error("missing parameter metadata field '%s' in %s" % (req_field, field_text))
vehicle.params.append(p)
current_file = None
debug("Processed %u params" % len(vehicle.params))
debug("Found %u documented libraries" % len(libraries))
if args.emit_sitl:
libraries = filter(lambda x : x.name == 'SIM_', libraries)
else:
libraries = filter(lambda x : x.name != 'SIM_', libraries)
libraries = list(libraries)
alllibs = libraries[:]
vehicle = vehicles[0]
def process_library(vehicle, library, pathprefix=None):
'''process one library'''
paths = library.Path.split(',')
for path in paths:
path = path.strip()
global current_file
current_file = path
debug("\n Processing file '%s'" % path)
if pathprefix is not None:
libraryfname = os.path.join(pathprefix, path)
elif path.find('/') == -1:
if len(vehicles) != 1:
print("Unable to handle multiple vehicles with .pde library")
continue
libraryfname = os.path.join(vehicles[0].path, path)
else:
libraryfname = os.path.normpath(os.path.join(apm_path + '/libraries/' + path))
if path and os.path.exists(libraryfname):
f = open(libraryfname)
p_text = f.read()
f.close()
else:
error("Path %s not found for library %s (fname=%s)" % (path, library.name, libraryfname))
continue
param_matches = prog_param.findall(p_text)
debug("Found %u documented parameters" % len(param_matches))
for param_match in param_matches:
(only_vehicles, param_name, field_text) = (param_match[0],
param_match[1],
|
param_match[2])
if len(only_vehicles):
only_vehicles_list = [x.strip() for x in only_vehicles.split(",")]
for only_vehicle in only_vehicles_list:
if only_vehicle not in valid_truenames:
raise ValueError("Invalid only_vehicle %s" % only_vehicle)
if vehi
|
cle.truename not in only_vehicles_list:
continue
p = Parameter(li
|
thehub/hubspace
|
hubspace/microSite.py
|
Python
|
gpl-2.0
| 70,144 | 0.011533 |
import os
import os.path
import glob
import re
from turbogears import controllers, expose, redirect, identity, validators as v, validate, config
from turbogears.identity.exceptions import IdentityFailure
from hubspace.validators import *
from formencode import ForEach
from hubspace.utilities.templates import try_render
from hubspace.utilities.login import login_args, requestPassword, resetPassword
from hubspace.utilities.dicts import AttrDict
from hubspace.utilities.permissions import is_host, addUser2Group
from hubspace.utilities.object import modify_attribute, obj_of_type
from hubspace.model import Location, LocationMetaData, User, RUsage, Group, MicroSiteSpace, ObjectReference, ListItem, Page, MetaWrapper, PublicPlace, List
from sqlobject import AND, SQLObjectNotFound, IN, LIKE, func
from sqlobject.events import listen, RowUpdateSignal, RowCreatedSignal, RowDestroySignal
import os, re, unicodedata, md5, random, sys, datetime, traceback, hmac as create_hmac
from hashlib import sha1
import cherrypy
from kid import XML
from hubspace.feeds import get_local_profiles, get_local_future_events, get_local_past_events
from BeautifulSoup import BeautifulSoup
import sendmail
import hubspace.model
model = hubspace.model
from hubspace.utilities.cache import strongly_expire
from hubspace.utilities.uiutils import now
import hubspace.sync.core as sync
from turbogears import database
import urlparse
from urllib import quote, urlencode
from urllib2 import urlopen, Request, build_opener, install_opener, HTTPCookieProcessor, HTTPRedirectHandler
import cookielib
from hubspace import configuration
import vobject
import patches
import logging
applogger = logging.getLogger("hubspace")
gr_cache = {}
def place(obj):
if isinstance(obj, Location):
return obj
elif hasattr(obj, 'location'):
return obj.location
elif hasattr(obj, 'place'):
return obj.place
else:
raise AttributeError("object has not location")
def bs_preprocess(html):
"""remove distracting whitespaces and newline characters"""
html = re.sub('\n', ' ', html) # convert newlines to spaces
return html
def html2xhtml(value):
value = value.strip()
value = BeautifulSoup(value).prettify()
value = bs_preprocess(value)
try:
XML(value).expand()
except:
cherrypy.response.headers['X-JSON'] = 'error'
print "not good XML"
return value
def get_profiles(*args, **kwargs):
location = kwarg
|
s.get('location')
no_of_images = 9
only_with_images = True
profiles = get_local_profiles(location, only_with_images, no_of_images)
if len(args) >=1:
profiles.update(get_user(*args))
return profiles
def get_user(*args, **kwargs):
if len(args) >= 1:
user = User.by_user_name(args[0])
if user.public_field and user.active:
return {'user': user}
return {}
def get_public_place(*args, **kwargs):
if len(args) >= 1:
place = PublicP
|
lace.select(AND(PublicPlace.q.name==args[0]))
if place.count():
return {'place': place[0]}
return {'place': None}
def get_events(*args, **kwargs):
no_of_events = 10
location = kwargs.get('location')
events = get_local_future_events(location, no_of_events)
events.update(get_local_past_events(location, no_of_events))
if len(args) >=1:
events.update(get_event(*args))
return events
def parseSubpageId(list_name):
if list_name.startswith('subpage'):
list_name,pageid=list_name.split('_')
else:
pageid = None
return (list_name,pageid)
standard_kw = ['microsite', 'page', 'location']
class RedirectToClient(Exception):
def __init__(self, url):
self.url = url
class HTTPRedirectClient(HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
raise RedirectToClient(newurl)
#return Request(newurl,
# headers=req.headers,
# origin_req_host=req.get_origin_req_host(),
# unverifiable=True)
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
forwarded_request_headers = ['If-None-Match']
forwarded_response_headers = ['Etag', 'Last-Modified', 'X-Pingback', 'Cache-Control', 'Pragma', 'Expires']
class MediaContent(Exception):
def __init__(self, response):
self.response = response
class AjaxContent(Exception):
def __init__(self, html):
self.html = html
def get_blog(*args, **kwargs):
blog_url = kwargs['page'].blog_url.strip()
args = list(args)
args.insert(0, blog_url)
url = '/'.join(args)
url += '/'
kw_args = dict((key.replace('+', '-'), val) for key, val in kwargs.iteritems() if key not in standard_kw)
post_data = None
if kw_args:
if cherrypy.request.method == 'GET':
url += '?' + urlencode(kw_args)
if cherrypy.request.method == 'POST':
post_data = urlencode(kw_args)
if cherrypy.session.has_key('cj'):
cj = cherrypy.session['cj']
else:
cj = cherrypy.session['cj'] = cookielib.CookieJar()
opener = build_opener(HTTPCookieProcessor(cj), HTTPRedirectClient)
install_opener(opener)
headers = {}
for header in forwarded_request_headers:
if cherrypy.request.headers.get(header, 0):
headers[header] = cherrypy.request.headers[header]
try:
if post_data:
blog = Request(url, post_data, headers)
else:
blog = Request(url, headers=headers)
blog_handle = urlopen(blog)
except RedirectToClient, e:
redirect(e.url.replace(blog_url, cherrypy.request.base + '/public/' + kwargs['page'].path_name))
except IOError, e:
if hasattr(e, 'reason'):
blog_body = "Could not get blog from: " + url + " because " + e.reason
blog_head = ""
elif hasattr(e, 'code'):
cherrypy.response.headers['status'] = e.code
blog_body = "Could not get blog from: " + url + " because " + str(e.code)
blog_head = ""
except ValueError:
blog_body = ""
blog_head = ""
return {'blog': blog_body, 'blog_head': blog_head}
else:
content_type = blog_handle.headers.type
if content_type not in ['text/html', 'text/xhtml']:
raise redirect(url)
blog = blog_handle.read()
#replace any links to the blog_url current address
our_url = cherrypy.request.base + '/public/' + kwargs['page'].path_name
blog = blog.replace(blog_url, our_url)
blog = BeautifulSoup(blog)
#blog = bs_preprocess(blog)
for input in blog.body.findAll('input', attrs={'name':re.compile('.*\-.*')}):
input['name'] = input['name'].replace('-', '+') #hack around the awkwardness of submitting names with '-' from FormEncode
#change back anything ending in .js .css .png .gif, .jpg .swf
for link in blog.findAll('link', attrs={'href':re.compile('.*' + re.escape(our_url) + '.*')}):
link['href'] = link['href'].replace(our_url, blog_url)
for link in blog.findAll('img', attrs={'src':re.compile('.*' + re.escape(our_url) + '.*')}):
link['src'] = link['src'].replace(our_url, blog_url)
for link in blog.findAll('script', attrs={'src':re.compile('.*' + re.escape(our_url) + '.*')}):
link['src'] = link['src'].replace(our_url, blog_url)
for header in blog.body.findAll('div', attrs={'id':'header'}):
header.extract()
|
Semanticle/Semanticle
|
sm-mt-devel/src/metabulate/tests/test26case-009d.py
|
Python
|
gpl-2.0
| 18,357 | 0.011113 |
'''
Copyright 2009, 2010 Anthony John Machin. All rights reserved.
Supplied subject to The GNU General Public License v3.0
Created on 28 Jan 2009
Last Updated on 10 July 2010
As test20 with tests of:
rules instantiation and query inference
Related:
single dict TS recursion rule plus generic rule + minimal data:
test20simple-001d - unmerged recursive rule EQ order correct QL order correct
test20simple-002d - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003d - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004d - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005d - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006d - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007d - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008d - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009d - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010d - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011d - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012d - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
single rbtree TS recursion rule plus generic rule + minimal data:
test20simple-001r - unmerged recursive rule EQ order correct QL order correct
test20simple-002r - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003r - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004r - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005r - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006r - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007r - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008r - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009r - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010r - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011r - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012r - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
@author: Administrator
'''
import metabulate.stores.stores as mtstores
import metabulate.facades.facade as mtfacade
import metabulate.utils.utils as mtutils
import metabulate.utils.debug as mtdebug
import metabulate.renderers.render as mtrender
import metabulate.rules.rules as mtrules
import metabulate.singletons.singleton as mtsingleton
if __name__ == "__main__":
# get default file paths and types
mtconfig = mtsingleton.Settings()._getItem('config')
debug_path = mtconfig._getItem('debugfile_path','%configfilesbase%Debug\\',mtconfig)
debug_type = mtconfig._getItem('debugfile_type','txt',mtconfig)
result_path = mtconfig._getItem('resultsfile_path','%configfilesbase%Results\\',mtconfig)
result_type = mtconfig._getItem('resultsfile_type','txt',mtconfig)
unload_path = mtconfig._getItem('stores_unloadfile_path','%configfilesbase%Unloads\\',mtconfig)
unload_type = mtconfig._getItem('stores_unloadfile_type','pyo',mtconfig)
# set debug criteria
dc22f = mtdebug.Criteria(methods=['_actionPredicate','_actionTriple','_processTriple','_addTriple'],
targets=[mtutils.Flatfile(path=debug_path,
name='DebugOutput_dc22',
type=debug_type)])
dc28 = mtdebug.Criteria(classes=['Query'],methods=['_solve'],notes=['trace'])
# set debug
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc8f,dc12f,dc7f,dc13f,dc10f,dc14f,dc15f])
# d._update(criteria=[dc6,dc20f_dup,dc20f_ok])
# d._update(criteria=[dc11f])
# d._update(criteria=[dc21f])
# d._update(criteria=[dc6,dc20f])
# files
fu = mtutils.Flatfile(path=unload_path,
name='test20r-30_unload_s1',
type=unload_type)
f1 = mtutils.Flatfile(path=result_path,
name='genealogy_test1',
type=result_type)
f3 = mtutils.Flatfile(path=result_path,
name='test20r-30_triples',
type=result_type)
f4 = mtutils.Flatfile(path=result_path,
name='test20r-30_rules',
type=result_type)
f5 = mtutils.Flatfile(path=result_path,
name='test20r-30_queries',
type=result_type)
f6 = mtutils.Flatfile(path=result_path,
name='test20r-30_results',
type=result_type)
# stores
sa = mtstores.TripleStore(structure='dict') # TS sa dict
sr = mtstores.TripleStore() # TS sr
s2 = mtstores.TripleStore()
s3 = mtstores.TripleStore()
s4 = mtstores.TripleStore()
|
# add namespaces in source stores
sa._addNamespace('mytriples', 'http://www.semanticle.org/triples/')
sa._addNamespace('com
|
triples', 'http://www.semanticle.com/triples/')
# triples for recursion test
sa._actionTriple("add [('mytriples#bob', 'child_of', 'alice'),('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')]")
sa._actionTriple("add", [('cev', 'child_of', 'http://www.semanticle.org/triples/#bob'),"('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')"])
sa._actionTriple("add", 'eve', 'child_of', 'comtriples#dan')
# sa._actionTriple("add",{('?desc', 'desc_of', '?ancs'):
# [
# [[('?child', 'child_of', '?ancs')],[('?desc', 'desc_of', '?child')]]
# [[('?desc', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]
# ,[[('?desc', 'child_of', '?ancs')]]
# ]})
sa._actionTriple("add","{('?desc', 'desc_of', '?ancs'):[[[('?child', 'child_of', '?ancs')],[('?desc', 'desc_of', '?child')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
# sa._actionTriple("add","{('?desc1', 'desc_of', '?ancs'):[[[('?desc1', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add",{('?obj', '?inv', '?sub'):
[
[[('?inv', 'rev_of', '?forw'),('?forw', 'rev_of', '?inv')]
,[('?sub', "?forw", '?obj')]]
,[[('?inv', 'syn_of', '?inv1'),('?inv1', 'syn_of', '?inv')]
,[('?obj', "?inv1", '?sub')]]
]}) # add rule to DTS._queryStore a (or change to DTS s1)
sa._actionTriple("add ('?desc', 'desc_of', '?ancs') :- [[[('?desc', 'child_of', '?ancs')]]]") # add rule clause 1 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add", 'ancs_of', 'rev_of', 'desc_of') # ant
# s1._action
|
lopopolo/hyperbola
|
hyperbola/__init__.py
|
Python
|
mit
| 63 | 0 |
__all__
|
= ("settings", "u
|
rls", "wsgi")
__version__ = "0.159.0"
|
pescobar/easybuild-framework
|
easybuild/toolchains/clanggcc.py
|
Python
|
gpl-2.0
| 1,795 | 0.001671 |
##
# Copyright 2013
|
-2020 Ghent Unive
|
rsity
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Clang + GCC compiler toolchain. Clang uses libstdc++. GFortran is used for Fortran code.
:author: Dmitri Gribenko (National Technical University of Ukraine "KPI")
"""
from easybuild.toolchains.compiler.clang import Clang
from easybuild.toolchains.compiler.gcc import Gcc
from easybuild.tools.toolchain.toolchain import SYSTEM_TOOLCHAIN_NAME
TC_CONSTANT_CLANGGCC = "ClangGCC"
class ClangGcc(Clang, Gcc):
"""Compiler toolchain with Clang and GFortran compilers."""
NAME = 'ClangGCC'
COMPILER_MODULE_NAME = ['Clang', 'GCC']
COMPILER_FAMILY = TC_CONSTANT_CLANGGCC
SUBTOOLCHAIN = SYSTEM_TOOLCHAIN_NAME
|
GuessWhoSamFoo/pandas
|
pandas/core/indexes/multi.py
|
Python
|
bsd-3-clause
| 113,443 | 0.000079 |
# pylint: disable=E1101,E1103,W0232
from collections import OrderedDict
import datetime
from sys import getsizeof
import warnings
import numpy as np
from pandas._libs import (
Timestamp, algos as libalgos, index as libindex, lib, tslibs)
import pandas.compat as compat
from pandas.compat import lrange, lzip, map, range, zip
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_categorical_dtype, is_hashable,
is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar,
pandas_dtype)
from pandas.core.dtypes.dtypes import ExtensionDtype, PandasExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.config import get_option
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs, ensure_index)
from pandas.core.indexes.frozen import FrozenList, _ensure_frozen
import pandas.core.missing as missing
from pandas.io.formats.printing import pprint_thing
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='MultiIndex',
target_klass='MultiIndex or list of tuples'))
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping la
|
bel combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
|
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype('object') << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
labels : sequence of arrays
Integers for each level designating which label at each location.
.. deprecated:: 0.24.0
Use ``codes`` instead
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Examples
---------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html>`_ for more.
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ['names']
rename = Index.set_names
# --------------------------------------------------------------------
# Constructors
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def __new__(cls, levels=None, codes=None, sortorder=None, names=None,
dtype=None, copy=False, name=None,
verify_integrity=True, _set_identity=True):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError('Length of levels and codes must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/codes')
result = object.__new__(MultiIndex)
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
if names is not None:
# handles name validation
result._s
|
carlvlewis/bokeh
|
bokeh/charts/builder/timeseries_builder.py
|
Python
|
bsd-3-clause
| 6,252 | 0.002879 |
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): a 2d iterable containing the values. Can be anything that
can be converted to a 2d array, and which is the x (time) axis is determined
by ``index``, while the others are interpreted as y values.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, l
|
ists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyval
|
ues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
|
masschallenge/impact-api
|
web/impact/impact/tests/test_judging_round.py
|
Python
|
mit
| 445 | 0 |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from i
|
mpact.tests.api_test_case import APITestCase
from impact.tests.factories import JudgingRoundFactory
class TestJudgingRound(APITestCase):
def test_str(self):
judging_round = JudgingRoundFactory()
judging_round_string = str(judging_round)
assert judging_round.name in judging_round_string
|
assert str(judging_round.program) in judging_round_string
|
vesln/robber.py
|
robber/matchers/respond_to.py
|
Python
|
mit
| 485 | 0.004124 |
from robber import expect
from robber.explanation import Explanation
from robber.matchers.base import Base
class RespondTo(Base):
"""
expect(obj).to.respond_to('method')
"""
def matches(self)
|
:
|
return hasattr(self.actual, self.expected) and callable(getattr(self.actual, self.expected))
@property
def explanation(self):
return Explanation(self.actual, self.is_negative, 'respond to', self.expected)
expect.register('respond_to', RespondTo)
|
rbarlow/pulp_openstack
|
plugins/pulp_openstack/plugins/distributors/glance_publish_steps.py
|
Python
|
gpl-2.0
| 4,901 | 0.003265 |
from gettext import gettext as _
import logging
from pulp.plugins.util.publish_step import PublishStep, UnitPublishStep
from pulp_openstack.common import constants
from pulp_openstack.common import openstack_utils
_logger = logging.getLogger(__nam
|
e__)
class GlancePublisher(PublishStep):
"""
Openstack Image Web publisher class that pushes images into Glance.
"""
def __init__(self, repo, publish_conduit, config):
"""
:param repo: Pulp managed Yum repository
:type repo: pulp.plugins.model.Repository
:param publish_conduit: Conduit providing access to relative Pulp functionality
:type publish_conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit
:param con
|
fig: Pulp configuration for the distributor
:type config: pulp.plugins.config.PluginCallConfiguration
"""
super(GlancePublisher, self).__init__(constants.PUBLISH_STEP_GLANCE_PUBLISHER,
repo, publish_conduit, config)
publish_step = PublishStep(constants.PUBLISH_STEP_OVER_GLANCE_REST)
publish_step.description = _('Pushing files to Glance.')
self.add_child(PublishImagesStep())
class PublishImagesStep(UnitPublishStep):
"""
Publish Images
"""
def __init__(self):
"""
Initialize publisher.
"""
super(PublishImagesStep, self).__init__(constants.PUBLISH_STEP_IMAGES,
constants.IMAGE_TYPE_ID)
self.context = None
self.description = _('Publishing Image Files.')
def initialize(self):
"""
Initialize publisher (second phase).
"""
_logger.info("initizialing, setting up connection to OpenStack")
keystone_conf = {'username': self.get_config().get('keystone-username'),
'password': self.get_config().get('keystone-password'),
'tenant_name': self.get_config().get('keystone-tenant'),
'auth_url': self.get_config().get('keystone-url')}
self.ou = openstack_utils.OpenstackUtils(**keystone_conf)
# this is to keep track of images we touched during process_unit(). At
# the end, anything untouched in glance that has the correct repo
# metadata is deleted.
self.images_processed = []
def process_unit(self, unit):
"""
Link the unit to the image content directory
:param unit: The unit to process
:type unit: pulp_openstack.common.models.OpenstackImage
"""
# we need to add the image checksum to our processed list ASAP, otherwise they will
# be deleted via finalize()
self.images_processed.append(unit.unit_key['image_checksum'])
_logger.debug("pushing unit %s from repo %s to glance" % (unit, self.get_repo().id))
images = list(self.ou.find_image(self.get_repo().id, unit.unit_key['image_checksum']))
_logger.debug("found existing image in glance: %s" % images)
if len(images) > 1:
raise RuntimeError("more than one image found with same checksum for repo %s!" %
self.get_repo().id)
if not images:
self.ou.create_image(unit.storage_path, self.get_repo().id,
name=unit.metadata['image_name'],
checksum=unit.unit_key['image_checksum'],
size=unit.metadata['image_size'])
else:
_logger.debug("image already exists, skipping publish")
def finalize(self):
"""
Finalize publish.
This examines self.images_processed and performs any deletions.
"""
# this could be more elegant
glance_image_by_checksum = {}
glance_images = self.ou.find_repo_images(self.get_repo().id)
for glance_image in glance_images:
glance_image_by_checksum[glance_image.checksum] = glance_image
_logger.debug("images in glance associated with repo: %s" % glance_image_by_checksum.keys())
pulp_image_checksums = self.images_processed
_logger.debug("images in pulp associated with repo: %s" % pulp_image_checksums)
for pulp_image_checksum in pulp_image_checksums:
if pulp_image_checksum not in glance_image_by_checksum.keys():
raise RuntimeError("Images found in pulp repo that were not published to glance. "
"Please consult error log for more details.")
for glance_image_checksum in glance_image_by_checksum:
if glance_image_checksum not in pulp_image_checksums:
_logger.info("deleting image with checksum %s from glance" % glance_image_checksum)
self.ou.delete_image(glance_image_by_checksum[glance_image_checksum])
|
insomnia-lab/calibre
|
src/calibre/gui2/preferences/template_functions.py
|
Python
|
gpl-3.0
| 10,331 | 0.002807 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import json, traceback
from PyQt4.Qt import QDialogButtonBox
from calibre.gui2 import error_dialog, warning_dialog
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
from calibre.gui2.preferences.template_functions_ui import Ui_Form
from calibre.gui2.widgets import PythonHighlighter
from calibre.utils.formatter_functions import (formatter_functions,
compile_user_function, load_user_template_functions)
class ConfigWidget(ConfigWidgetBase, Ui_Form):
def genesis(self, gui):
self.gui = gui
self.db = gui.library_view.model().db
help_text = _('''
<p>Here you can add and remove functions used in template processing. A
template function is written in python. It takes information from the
book, processes it in some way, then returns a string result. Functions
defined here are usable in templates in the same way that builtin
functions are usable. The function must be named <b>evaluate</b>, and
must have the signature shown below.</p>
<p><code>evaluate(self, formatter, kwargs, mi, locals, your parameters)
→ returning a unicode string</code></p>
<p>The parameters of the evaluate function are:
<ul>
<li><b>formatter</b>: the instance of the formatter being used to
evaluate the current template. You can use this to do recursive
template evaluation.</li>
<li><b>kwargs</b>: a dictionary of metadata. Field values are in this
dictionary.
<li><b>mi</b>: a Metadata instance. Used to get field information.
This parameter can be None in some cases, such as when evaluating
non-book templates.</li>
<li><b>locals</b>: the local variables assigned to by the current
template program.</li>
<li><b>your parameters</b>: You must supply one or more formal
parameters. The number must match the arg count box, unless arg count is
-1 (variable number or arguments), in which case the l
|
ast argument must
be *args. At least one argument is required, and is usually the value of
the field being operated up
|
on. Note that when writing in basic template
mode, the user does not provide this first argument. Instead it is
supplied by the formatter.</li>
</ul></p>
<p>
The following example function checks the value of the field. If the
field is not empty, the field's value is returned, otherwise the value
EMPTY is returned.
<pre>
name: my_ifempty
arg count: 1
doc: my_ifempty(val) -- return val if it is not empty, otherwise the string 'EMPTY'
program code:
def evaluate(self, formatter, kwargs, mi, locals, val):
if val:
return val
else:
return 'EMPTY'</pre>
This function can be called in any of the three template program modes:
<ul>
<li>single-function mode: {tags:my_ifempty()}</li>
<li>template program mode: {tags:'my_ifempty($)'}</li>
<li>general program mode: program: my_ifempty(field('tags'))</li>
</p>
''')
self.textBrowser.setHtml(help_text)
def initialize(self):
try:
self.builtin_source_dict = json.loads(P('template-functions.json', data=True,
allow_user_override=False).decode('utf-8'))
except:
traceback.print_exc()
self.builtin_source_dict = {}
self.funcs = formatter_functions().get_functions()
self.builtins = formatter_functions().get_builtins_and_aliases()
self.build_function_names_box()
self.function_name.currentIndexChanged[str].connect(self.function_index_changed)
self.function_name.editTextChanged.connect(self.function_name_edited)
self.argument_count.valueChanged.connect(self.enable_replace_button)
self.documentation.textChanged.connect(self.enable_replace_button)
self.program.textChanged.connect(self.enable_replace_button)
self.create_button.clicked.connect(self.create_button_clicked)
self.delete_button.clicked.connect(self.delete_button_clicked)
self.create_button.setEnabled(False)
self.delete_button.setEnabled(False)
self.replace_button.setEnabled(False)
self.clear_button.clicked.connect(self.clear_button_clicked)
self.replace_button.clicked.connect(self.replace_button_clicked)
self.program.setTabStopWidth(20)
self.highlighter = PythonHighlighter(self.program.document())
def enable_replace_button(self):
self.replace_button.setEnabled(self.delete_button.isEnabled())
def clear_button_clicked(self):
self.build_function_names_box()
self.program.clear()
self.documentation.clear()
self.argument_count.clear()
self.create_button.setEnabled(False)
self.delete_button.setEnabled(False)
def build_function_names_box(self, scroll_to='', set_to=''):
self.function_name.blockSignals(True)
func_names = sorted(self.funcs)
self.function_name.clear()
self.function_name.addItem('')
self.function_name.addItems(func_names)
self.function_name.setCurrentIndex(0)
if set_to:
self.function_name.setEditText(set_to)
self.create_button.setEnabled(True)
self.function_name.blockSignals(False)
if scroll_to:
idx = self.function_name.findText(scroll_to)
if idx >= 0:
self.function_name.setCurrentIndex(idx)
if scroll_to not in self.builtins:
self.delete_button.setEnabled(True)
def delete_button_clicked(self):
name = unicode(self.function_name.currentText())
if name in self.builtins:
error_dialog(self.gui, _('Template functions'),
_('You cannot delete a built-in function'), show=True)
if name in self.funcs:
del self.funcs[name]
self.changed_signal.emit()
self.create_button.setEnabled(True)
self.delete_button.setEnabled(False)
self.build_function_names_box(set_to=name)
self.program.setReadOnly(False)
else:
error_dialog(self.gui, _('Template functions'),
_('Function not defined'), show=True)
def create_button_clicked(self):
self.changed_signal.emit()
name = unicode(self.function_name.currentText())
if name in self.funcs:
error_dialog(self.gui, _('Template functions'),
_('Name %s already used')%(name,), show=True)
return
if self.argument_count.value() == 0:
box = warning_dialog(self.gui, _('Template functions'),
_('Argument count should be -1 or greater than zero. '
'Setting it to zero means that this function cannot '
'be used in single function mode.'), det_msg = '',
show=False)
box.bb.setStandardButtons(box.bb.standardButtons() | QDialogButtonBox.Cancel)
box.det_msg_toggle.setVisible(False)
if not box.exec_():
return
try:
prog = unicode(self.program.toPlainText())
cls = compile_user_function(name, unicode(self.documentation.toPlainText()),
self.argument_count.value(), prog)
self.funcs[name] = cls
self.build_function_names_box(scroll_to=name)
except:
error_dialog(self.gui, _('Template functions'),
_('Exception while compiling function'), show=True,
det_msg=traceback.format_exc())
def function_name_edited(self, txt):
self.documentation.setReadOnly(False)
sel
|
tamasgal/km3pipe
|
pipeinspector/gui.py
|
Python
|
mit
| 1,923 | 0.00052 |
import urwid
from pipeinspector.widgets import BlobWidget, BlobBrowser
from pipeinspector.settings import UI
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
class MainFrame(urwid.Frame):
"""
Represents the main GUI
"""
def __init__(self, pump):
self.header = urwid.AttrWrap(
urwid.Text("PipeInspector", align="center"), "header"
)
self.blob_browser = BlobBrowser()
self.info_area = urwid.Text("")
self.blobs = BlobWidget()
self.footer = urwid.Columns([self.info_area, self.blobs])
self.frame = urwid.AttrWrap(
|
urwid.Frame(self.blob_browser, header=self.header, footer=self.footer),
"default",
)
urwid.Frame.__init__(self, self.frame)
self.overlay = None
self.pump = pump
urwid.connect_signal(self.blobs, "blob_selected", self.blob_selected)
self.blobs.goto_blob(0)
def blob_selected(self, index):
|
self.info_area.set_text("Blob: {0}".format(index))
blob = self.pump.get_blob(index)
self.blob_browser.load(blob)
def keypress(self, size, key):
input = urwid.Frame.keypress(self, size, key)
if input is None:
return
if input in UI.keys["left"]:
self.blobs.previous_blob()
elif input in UI.keys["right"]:
self.blobs.next_blob()
elif input in [key.upper() for key in UI.keys["left"]]:
self.blobs.previous_blob(step=10)
elif input in [key.upper() for key in UI.keys["right"]]:
self.blobs.next_blob(step=10)
elif input in UI.keys["home"]:
self.blobs.goto_blob(0)
else:
return self.body.keypress(size, input)
|
piqoni/onadata
|
onadata/libs/utils/image_tools.py
|
Python
|
bsd-2-clause
| 3,265 | 0 |
import requests
from cStringIO import StringIO
from PIL import Image
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.files.base import ContentFile
from tempfile import NamedTemporaryFile
from onadata.libs.utils.viewer_tools import get_path
def flat(*nums):
'''Build a tuple of ints from float or integer arguments.
Useful because PIL crop and resize require integer points.
source: https://gist.github.com/16a01455
'''
return tuple(int(round(n)) for n in nums)
def get_dimensions((width, height), longest_side):
if width > height:
width = longest_side
height = (height / width) * longest_side
elif height > widt
|
h:
height = longest_side
width = (width / height) * longest_side
else:
height = lo
|
ngest_side
width = longest_side
return flat(width, height)
def _save_thumbnails(image, path, size, suffix):
nm = NamedTemporaryFile(suffix='.%s' % settings.IMG_FILE_TYPE)
default_storage = get_storage_class()()
try:
# Ensure conversion to float in operations
image.thumbnail(
get_dimensions(image.size, float(size)), Image.ANTIALIAS)
except ZeroDivisionError:
pass
image.save(nm.name)
default_storage.save(
get_path(path, suffix), ContentFile(nm.read()))
nm.close()
def resize(filename):
default_storage = get_storage_class()()
path = default_storage.url(filename)
req = requests.get(path)
if req.status_code == 200:
im = StringIO(req.content)
image = Image.open(im)
conf = settings.THUMB_CONF
[_save_thumbnails(
image, filename,
conf[key]['size'],
conf[key]['suffix']) for key in settings.THUMB_ORDER]
def resize_local_env(filename):
default_storage = get_storage_class()()
path = default_storage.path(filename)
image = Image.open(path)
conf = settings.THUMB_CONF
[_save_thumbnails(
image, path, conf[key]['size'],
conf[key]['suffix']) for key in settings.THUMB_ORDER]
def image_url(attachment, suffix):
'''Return url of an image given size(@param suffix)
e.g large, medium, small, or generate required thumbnail
'''
url = attachment.media_file.url
if suffix == 'original':
return url
else:
default_storage = get_storage_class()()
fs = get_storage_class('django.core.files.storage.FileSystemStorage')()
if suffix in settings.THUMB_CONF:
size = settings.THUMB_CONF[suffix]['suffix']
filename = attachment.media_file.name
if default_storage.exists(filename):
if default_storage.exists(get_path(filename, size)) and\
default_storage.size(get_path(filename, size)) > 0:
url = default_storage.url(
get_path(filename, size))
else:
if default_storage.__class__ != fs.__class__:
resize(filename)
else:
resize_local_env(filename)
return image_url(attachment, suffix)
else:
return None
return url
|
TieWei/nova
|
nova/tests/db/test_db_api.py
|
Python
|
apache-2.0
| 311,863 | 0.000895 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding=UTF8
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific langu
|
age governing permissions and limitations
# under
|
the License.
"""Unit tests for the DB API."""
import copy
import datetime
import iso8601
import types
import uuid as stdlib_uuid
import mox
import netaddr
from oslo.config import cfg
from sqlalchemy.dialects import sqlite
from sqlalchemy import exc
from sqlalchemy.exc import IntegrityError
from sqlalchemy import MetaData
from sqlalchemy.orm import exc as sqlalchemy_orm_exc
from sqlalchemy.orm import query
from sqlalchemy.sql.expression import select
from nova import block_device
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
from nova import test
from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
get_engine = db_session.get_engine
get_session = db_session.get_session
def _quota_reserve(context, project_id, user_id):
"""Create sample Quota, QuotaUsage and Reservation objects.
There is no method db.quota_usage_create(), so we have to use
db.quota_reserve() for creating QuotaUsage objects.
Returns reservations uuids.
"""
def get_sync(resource, usage):
def sync(elevated, project_id, user_id, session):
return {resource: usage}
return sync
quotas = {}
user_quotas = {}
resources = {}
deltas = {}
for i in range(3):
resource = 'resource%d' % i
if i == 2:
# test for project level resources
resource = 'fixed_ips'
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = quotas[resource]
else:
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = db.quota_create(context, project_id,
resource, i,
user_id=user_id)
sync_name = '_sync_%s' % resource
resources[resource] = quota.ReservableResource(
resource, sync_name, 'quota_res_%d' % i)
deltas[resource] = i
setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
sqlalchemy_api, sync_name)
return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
timeutils.utcnow(), CONF.until_refresh,
datetime.timedelta(days=1), project_id, user_id)
class DbTestCase(test.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def create_instance_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
'node': 'node1', 'project_id': self.project_id,
'vm_state': 'fake'}
if 'context' in kwargs:
ctxt = kwargs.pop('context')
args['project_id'] = ctxt.project_id
else:
ctxt = self.context
args.update(kwargs)
return db.instance_create(ctxt, args)
def fake_metadata(self, content):
meta = {}
for i in range(0, 10):
meta["foo%i" % i] = "this is %s item %i" % (content, i)
return meta
def create_metadata_for_instance(self, instance_uuid):
meta = self.fake_metadata('metadata')
db.instance_metadata_update(self.context, instance_uuid, meta, False)
sys_meta = self.fake_metadata('system_metadata')
db.instance_system_metadata_update(self.context, instance_uuid,
sys_meta, False)
return meta, sys_meta
class DecoratorTestCase(test.TestCase):
def _test_decorator_wraps_helper(self, decorator):
def test_func():
"""Test docstring."""
decorated_func = decorator(test_func)
self.assertEquals(test_func.func_name, decorated_func.func_name)
self.assertEquals(test_func.__doc__, decorated_func.__doc__)
self.assertEquals(test_func.__module__, decorated_func.__module__)
def test_require_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
def test_require_admin_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result['id'], host)
return result
class NotDbApiTestCase(DbTestCase):
def setUp(self):
super(NotDbApiTestCase, self).setUp()
self.flags(connection='notdb://', group='database')
def test_instance_get_all_by_filters_regex_unsupported_db(self):
# Ensure that the 'LIKE' operator is used for unsupported dbs.
self.create_instance_with_args(display_name='test1')
self.create_instance_with_args(display_name='test.*')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.context,
{'display_name': 'test.*'})
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'})
self.assertEqual(2, len(result))
def test_instance_get_all_by_filters_paginate(self):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
marker=None)
self.assertEqual(3, len(result))
result = db.in
|
skosukhin/spack
|
var/spack/repos/builtin/packages/libtermkey/package.py
|
Python
|
lgpl-2.1
| 1,935 | 0.001034 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of S
|
pack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hop
|
e that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libtermkey(Package):
"""Easy keyboard entry processing for terminal programs"""
homepage = "http://www.leonerd.org.uk/code/libtermkey/"
url = "http://www.leonerd.org.uk/code/libtermkey/libtermkey-0.18.tar.gz"
version('0.18', '3be2e3e5a851a49cc5e8567ac108b520')
version('0.17', '20edb99e0d95ec1690fe90e6a555ae6d')
version('0.16', '7a24b675aaeb142d30db28e7554987d4')
version('0.15b', '27689756e6c86c56ae454f2ac259bc3d')
version('0.14', 'e08ce30f440f9715c459060e0e048978')
depends_on('libtool', type='build')
depends_on('ncurses')
def install(self, spec, prefix):
make()
make("install", "PREFIX=" + prefix)
|
petezybrick/iote2e
|
iote2e-pyclient/src/iote2epyclient/processsim/processsimhumiditytomister.py
|
Python
|
apache-2.0
| 3,105 | 0.009662 |
# Copyright 2016, 2017 Peter Zybrick and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp
|
ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ProcessSimHumidityToMister
:author: Pete Zybrick
:contact: pzybrick@gmail.com
:version: 1.0.0
"""
import logging
impor
|
t time
import uuid
import sys
from iote2epyclient.launch.clientutils import ClientUtils
from iote2epyclient.schema.iote2erequest import Iote2eRequest
logger = logging.getLogger(__name__)
class ProcessSimHumidityToMister(object):
'''
Simulate Humidity Sensor and Mister
'''
def __init__(self, loginVo, sensorName):
self.loginVo = loginVo
self.sensorName = sensorName
self.humidityDirectionIncrease = True
self.HUMIDITY_MIN = 82.0
self.HUMIDITY_MAX = 93.0
self.HUMIDITY_INCR = .5
self.humidityNow = 90.0
def createIote2eRequest(self ):
time.sleep(2)
logger.info('ProcessSimHumidityToMister createIote2eRequest:')
if self.humidityDirectionIncrease and self.humidityNow < self.HUMIDITY_MAX:
self.humidityNow += self.HUMIDITY_INCR
elif (not self.humidityDirectionIncrease) and self.humidityNow > self.HUMIDITY_MIN:
self.humidityNow -= self.HUMIDITY_INCR;
logger.info( "humidityNow: {}".format(self.humidityNow))
if self.humidityNow <= self.HUMIDITY_MIN or self.humidityNow >= self.HUMIDITY_MAX:
logger.error("Humidity exceeded: {}".format(self.humidityNow))
# TODO: need to throw an exception or something so the calling thread exits
sys.exit(8)
# TODO: read humidity from sensor here
pairs = { self.sensorName: str(self.humidityNow)}
iote2eRequest = Iote2eRequest( login_name=self.loginVo.loginName,source_name=self.loginVo.sourceName, source_type='humidity',
request_uuid=str(uuid.uuid4()),
request_timestamp=ClientUtils.nowIso8601(),
pairs=pairs, operation='SENSORS_VALUES')
return iote2eRequest
def handleIote2eResult(self, iote2eResult ):
# TODO: turn on/off actuator (fan) here
logger.info('ProcessSimHumidityToMister handleIote2eResult: ' + str(iote2eResult))
actuatorValue = iote2eResult.pairs['actuatorValue'];
logger.info('actuatorValue {}'.format(actuatorValue))
if 'off' == actuatorValue:
self.humidityDirectionIncrease = False;
elif 'on' == actuatorValue:
self.humidityDirectionIncrease = True;
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/idlelib/PyShell.py
|
Python
|
gpl-2.0
| 48,715 | 0.00115 |
#! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import exceptions
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from EditorWindow import EditorWindow, fixwordbreaks
from FileList import FileList
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from OutputWindow import OutputWindow
from configHandler import idleConf
import idlever
import rpc
import Debugger
import RemoteDebugger
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno):
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename, lineno))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
line = linecache.getline(filename, lineno).strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(which destroys them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, l
|
ineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(
|
self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
|
dscho/hg
|
mercurial/copies.py
|
Python
|
gpl-2.0
| 19,523 | 0.001639 |
# copies.py - copy detection for Mercurial
#
# Copyright 2008 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import heapq
from . import (
node,
pathutil,
scmutil,
util,
)
def _findlimit(repo, a, b):
"""
Find the last revision that needs to be checked to ensure that a full
transitive closure for file copies can be properly calculated.
Generally, this means finding the earliest revision number that's an
ancestor of a or b but not both, except when a or b is a direct descendent
of the other, in which case we can return the minimum revnum of a and b.
None if no such revision exists.
"""
# basic idea:
# - mark a and b with different sides
# - if a parent's children are all on the same side, the parent is
# on that side, otherwise it is on no side
# - walk the graph in topological order with the help of a heap;
# - add unseen parents to side map
# - clear side of any parent that has children on different sides
# - track number of interesting revs that might still be on a side
# - track the lowest interesting rev seen
# - quit when interesting revs is zero
cl = repo.changelog
working = len(cl) # pseudo rev for the working directory
if a is None:
a = working
if b is None:
b = working
side = {a: -1, b: 1}
visit = [-a, -b]
heapq.heapify(visit)
interesting = len(visit)
hascommonancestor = False
limit = working
while interesting:
r = -heapq.heappop(visit)
if r == working:
parents = [cl.rev(p) for p in repo.dirstate.parents()]
else:
parents = cl.parentrevs(r)
for p in parents:
if p < 0:
continue
if p not in side:
# first time we see p; add it to visit
side[p] = side[r]
if side[p]:
interesting += 1
heapq.heappush(visit, -p)
elif side[p] and side[p] != side[r]:
# p was interesting but now we know better
side[p] = 0
interesting -= 1
hascommonancestor = True
if side[r]:
limit = r # lowest rev visited
interesting -= 1
if not hascommonancestor:
return None
# Consider the following flow (see test-commit-amend.t under issue4405):
# 1/ File 'a0' committed
# 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
# 3/ Move back to first commit
# 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
# 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
#
# During the amend in step five, we will be in this state:
#
# @ 3 temporary amend commit for a1-amend
# |
# o 2 a1-amend
# |
# | o 1 a1
# |/
# o 0 a0
#
# When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
# yet the filelog has the copy information in rev 1 and we will not look
# back far enough unless we also look at the a and b as candidates.
# This only occurs when a is a descendent of b or visa-versa.
return min(limit, a, b)
def _chain(src, dst, a, b):
'''chain two sets of copies a->b'''
t = a.copy()
for k, v in b.iteritems():
if v in t:
# found a chain
if t[v] != k:
# file wasn't renamed back to itself
t[k] = t[v]
if v not in dst:
# chain was a rename, not a copy
del t[v]
if v in src:
# file is a copy of an existing file
t[k] = v
# remove criss-crossed copies
for k, v in t.items():
if k in src and v in dst:
del t[k]
return t
def _tracefile(fctx, am, limit=-1):
'''return file context that is the ancestor of fctx present in ancestor
manifest am, stopping after the first ancestor lower than limit'''
for f in fctx.ancestors():
if am.get(f.path(), None) == f.filenode():
return f
if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
return None
def _dirstatecopies(d):
ds = d._repo.dirstate
c = ds.copies().copy()
for k in c.keys():
if ds[k] not in 'anm':
del c[k]
return c
def _computeforwardmissing(a, b, match=None):
"""Computes which files are in b but not a.
This is its own function so extensions can easily wrap this call to see what
files _forwardcopies is about to process.
"""
ma = a.manifest()
mb = b.manifest()
if match:
ma = ma.matches(match)
mb = mb.matches(match)
return mb.filesnotin(ma)
def _forwardcopies(a, b, match=None):
'''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
# check for working copy
w = None
if b.rev() is None:
w = b
b = w.p1()
if a == b:
# short-circuit to avoid issues with merge states
return _dirstatecopies(w)
# files might have to be traced back to the fctx parent of the last
# one-side-only changeset, but not further back than that
limit = _findlimit(a._repo, a.rev(), b.rev())
if limit is None:
limit = -1
am = a.manifest()
# find where new files came from
# we currently d
|
on't try to find where old files went, too expensive
# this means we can miss a case like 'hg rm b; hg cp a b'
cm = {}
# Computing the forward missing is quite expensive on large manifests, since
# it compares the entire manifests. We can optimize it in the common use
# case of computing what copies are in a commit versus i
|
ts parent (like
# during a rebase or histedit). Note, we exclude merge commits from this
# optimization, since the ctx.files() for a merge commit is not correct for
# this comparison.
forwardmissingmatch = match
if not match and b.p1() == a and b.p2().node() == node.nullid:
forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
for f in missing:
fctx = b[f]
fctx._ancestrycontext = ancestrycontext
ofctx = _tracefile(fctx, am, limit)
if ofctx:
cm[f] = ofctx.path()
# combine copies from dirstate if necessary
if w is not None:
cm = _chain(a, w, cm, _dirstatecopies(w))
return cm
def _backwardrenames(a, b):
if a._repo.ui.configbool('experimental', 'disablecopytrace'):
return {}
# Even though we're not taking copies into account, 1:n rename situations
# can still exist (e.g. hg cp a b; hg mv a c). In those cases we
# arbitrarily pick one of the renames.
f = _forwardcopies(b, a)
r = {}
for k, v in sorted(f.iteritems()):
# remove copies
if v in a:
continue
r[v] = k
return r
def pathcopies(x, y, match=None):
'''find {dst@y: src@x} copy mapping for directed compare'''
if x == y or not x or not y:
return {}
a = y.ancestor(x)
if a == x:
return _forwardcopies(x, y, match=match)
if a == y:
return _backwardrenames(x, y)
return _chain(x, y, _backwardrenames(x, a),
_forwardcopies(a, y, match=match))
def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2):
"""Computes, based on addedinm1 and addedinm2, the files exclusive to c1
and c2. This is its own function so extensions can easily wrap this call
to see what files mergecopies is about to process.
Even though c1 and c2 are not used in this function, they are useful in
other extensions for being able to read the file nodes of the changed files.
"""
u1 = sorted(addedinm1 - addedinm2)
u2 = sorted(addedinm2 - addedinm1)
if u1:
repo.ui.debug(" unmatched files in local:\n %s\n"
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_core/logging/handlers.py
|
Python
|
mit
| 210 | 0 |
fro
|
m django.db import transaction
from waldur_core.logging import tasks
def process_hook(sender, instance, created=False, **kwargs):
transaction.on_commit(lambda: tasks.process_event.delay(instance.pk))
| |
sivaprakashniet/push_pull
|
push_and_pull/wsgi.py
|
Python
|
bsd-3-clause
| 2,240 | 0.005357 |
"""
WSGI config for {{ project_name }} project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
#import sys
#import site
#import subprocess
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../")
# Add the virtualenv packages to the site directory. This uses the technique
# described at http://code.goo
|
gle.com/p/modwsgi/wiki/VirtualEnvironments
# Remember original sys.path.
#prev_sys_path = list(sys.path)
# Get
|
the path to the env's site-packages directory
#site_packages = subprocess.check_output([
# os.path.join(PROJECT_ROOT, '.virtualenv/bin/python'),
# '-c',
# 'from distutils.sysconfig import get_python_lib;'
# 'print get_python_lib(),'
#]).strip()
# Add the virtualenv site-packages to the site packages
#site.addsitedir(site_packages)
# Reorder sys.path so the new directories are at the front.
#new_sys_path = []
#for item in list(sys.path):
# if item not in prev_sys_path:
# new_sys_path.append(item)
# sys.path.remove(item)
#sys.path[:0] = new_sys_path
# Add the app code to the path
#sys.path.append(PROJECT_ROOT)
os.environ['CELERY_LOADER'] = 'django'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "push_and_pull.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
TomWerner/BlunderPuzzler
|
gettingstarted/settings.py
|
Python
|
mit
| 4,580 | 0.001092 |
"""
Django settings for gettingstarted project, on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: change this before deploying to production!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'puzzler'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'gettingstarted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.
|
context_proc
|
essors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gettingstarted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
# ... your other backends
'django.contrib.auth.backends.ModelBackend',
)
try:
from .local_settings import *
except ImportError:
PASSWORD = os.environ.get('CHESS_PASSWORD', None)
USERNAME = os.environ.get('CHESS_USERNAME', None)
ALLOWED_HOSTS = [os.environ.get('HOST_URL', None), 'chess.com']
CURRENT_HOST = os.environ.get('HOST_URL', None)
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY', None)
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = True
|
goshippo/shippo-python-client
|
setup.py
|
Python
|
mit
| 1,408 | 0 |
import os
import sys
import warnings
from setuptools import setup
version_contents = {}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "shippo", "version.py"), encoding="utf-8") as f:
exec(f.read(), version_contents)
setup(
name='shippo',
version=version_contents['VERSION'],
description='Shipping API Python library (USPS, FedEx, UPS and more)',
author='Shippo',
author_email='support@goshippo.com',
url='https://goshippo.com/',
packages=['shippo', 'shippo.test', 'shippo.test.integration'],
package_data={'shippo': ['../VERSION']},
install_requires=[
'requests >= 2.21.0, <= 2.27.1',
'simplejson >= 3.16.0, <= 3.17.2',
],
test_suite='shippo.test.all',
tests_require=['unittest2', 'mock', 'vcrpy'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :
|
: OSI Approved :: MIT License",
"Operating System
|
:: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
|
Shinichi-Nakagawa/country-house-server
|
server/house/urls.py
|
Python
|
mit
| 349 | 0.002865 |
#!/usr/bin/env python
# -*- coding: utf-8
|
-*-
__author__ = 'Shinichi Nakagawa'
from house import views
from django.conf.urls import patterns, url, include
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'metrics', views.MetricsViewSet)
urlpattern
|
s = patterns('',
url(r'^', include(router.urls)),
)
|
rakeshvar/theanet
|
train.py
|
Python
|
apache-2.0
| 7,597 | 0.001843 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import ast
import pickle
import numpy as np
import os
import socket
import sys
import importlib
from datetime import datetime
import theano as th
import theanet.neuralnet as nn
################################ HELPER FUNCTIONS ############################
def share(data, dtype=th.config.floatX, borrow=True):
return th.shared(np.asarray(data, dtype), borrow=borrow)
def fixdim(arr):
if arr.ndim == 2:
side = int(arr.shape[-1] ** .5)
assert side**2 == arr.shape[-1], "Need a perfect square"
return arr.reshape((arr.shape[0], 1, side, side))
if arr.ndim == 3:
return np.expand_dims(arr, axis=1)
if arr.ndim == 4:
return arr
raise ValueError("Image data arrays must have 2,3 or 4 dimensions only")
class WrapOut:
def __init__(self, use_file, name=''):
self.name = name
self.use_file = use_file
if use_file:
self.stream = open(name, 'w', 1)
else:
self.stream = sys.stdout
def write(self, data):
self.stream.write(data)
def forceflush(self):
if self.use_file:
self.stream.close()
self.stream = open(self.name, 'a', 1)
def __getattr__(self, attr):
return getattr(self.stream, attr)
################################### MAIN CODE ################################
if len(sys.argv) < 3:
print('Usage:', sys.argv[0],
''' <dataset> <params_file(s)> [redirect=0]
dataset:
Should be the name of a module in the data folder.
Like "mnist", "telugu_ocr", "numbers" etc.
params_file(s) :
Parameters for the NeuralNet
- name.prms : contains the initialization code
- name.pkl : pickled file from a previous run (has wts too).
redirect:
1 - redirect stdout to a params_<SEED>.txt file
''')
sys.exit()
dataset_name = sys.argv[1]
prms_file_name = sys.argv[2]
########################################## Import Parameters
if prms_file_name.endswith('.pkl'):
with open(prms_file_name, 'rb') as f:
params = pickle.load(f)
else:
with open(prms_file_name, 'r') as f:
params = ast.literal_eval(f.read())
layers = params['layers']
tr_prms = params['training_params']
try:
allwts = params['allwts']
except KeyError:
allwts = None
## Init SEED
if (not 'SEED' in tr_prms) or (tr_prms['SEED'] is None):
tr_prms['SEED'] = np.random.randint(0, 1e6)
out_file_head = os.path.basename(prms_file_name,).replace(
os.path.splitext(prms_file_name)[1], "_{:06d}".format(tr_prms['SEED']))
if sys.argv[-1] is '1':
print("Printing output to {}.txt".format(out_file_head), file=sys.stderr)
sys.stdout = WrapOut(True, out_file_head + '.txt')
else:
sys.stdout = WrapOut(False)
########################################## Print Parameters
print(' '.join(sys.argv), file=sys.stderr)
print(' '.join(sys.argv))
print('Time :' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('Device : {} ({})'.format(th.config.device, th.config.floatX))
print('Host :', socket.gethostname())
print(nn.get_layers_info(layers))
print(nn.get_training_params_info(tr_prms))
########################################## Load Data
data = importlib.import_module("data." + dataset_name)
tr_corpus_sz, n_maps, _, layers[0][1]['img_sz'] = data.training_x.shape
te_corpus_sz = data.testing_x.shape[0]
data.training_x = fixdim(data.training_x)
data.testing_x = fixdim(data.testing_x)
trin_x = share(data.training_x)
test_x = share(data.testing_x)
trin_y = share(data.training_y, 'int32')
test_y = share(data.testing_y, 'int32')
try:
trin_aux = share(data.training_aux)
test_aux = share(data.testing_aux)
except AttributeError:
trin_aux, test_aux = None, None
print("\nInitializing the net ... ")
net = nn.NeuralNet(layers, tr_prms, allwts)
print(net)
print(net.get_wts_info(detailed=True).replace("\n\t", ""))
print("\nCompiling ... ")
training_fn = net.get_trin_model(trin_x, trin_y, trin_
|
aux)
test_fn_tr = net.get_test_model(trin_x, trin_y, trin_aux)
test_fn_te = net.get_test_model(test_x, test_y, test_aux)
batch_sz = tr_prms['BATCH_SZ']
nEpochs = tr_prms['NUM_EPOCHS']
nTrBatches = tr_corpus_sz // batch_sz
nTeBatches = te_corpus_sz // batch_sz
############################################## MORE HELPERS
def test_wrapper(nylist):
sym_err, bit_err,
|
n = 0., 0., 0
for symdiff, bitdiff in nylist:
sym_err += symdiff
bit_err += bitdiff
n += 1
return 100 * sym_err / n, 100 * bit_err / n
if net.tr_layers[-1].kind == 'LOGIT':
aux_err_name = 'BitErr'
else:
aux_err_name = 'P(MLE)'
def get_test_indices(tot_samps, bth_samps=tr_prms['TEST_SAMP_SZ']):
n_bths_each = int(bth_samps / batch_sz)
n_bths_all = int(tot_samps / batch_sz)
cur = 0
while True:
yield [i % n_bths_all for i in range(cur, cur + n_bths_each)]
cur = (cur + n_bths_each) % n_bths_all
test_indices = get_test_indices(te_corpus_sz)
trin_indices = get_test_indices(tr_corpus_sz)
pickle_file_name = out_file_head + '_{:02.0f}.pkl'
saved_file_name = None
def do_test():
global saved_file_name
test_err, aux_test_err = test_wrapper(test_fn_te(i)
for i in next(test_indices))
trin_err, aux_trin_err = test_wrapper(test_fn_tr(i)
for i in next(trin_indices))
print("{:5.2f}% ({:5.2f}%) {:5.2f}% ({:5.2f}%)".format(
trin_err, aux_trin_err, test_err, aux_test_err))
sys.stdout.forceflush()
if saved_file_name:
os.remove(saved_file_name)
saved_file_name = pickle_file_name.format(test_err)
with open(saved_file_name, 'wb') as pkl_file:
pickle.dump(net.get_init_params(), pkl_file, -1)
############################################ Training Loop
np.set_printoptions(precision=2)
print("Training ...")
print("Epoch Cost Tr_Error Tr_{0} Te_Error Te_{0}".format(aux_err_name))
for epoch in range(nEpochs):
total_cost = 0
for ibatch in range(nTrBatches):
cost, features, logprobs = training_fn(ibatch)
total_cost += cost
labels = data.training_y[ibatch*batch_sz:(ibatch+1)*batch_sz]
true_features = features[np.arange(batch_sz), labels]
if np.min(true_features) < -6 and layers[-1][0][:3] == "Exp":
print("Epoch:{} Iteration:{}".format(epoch, ibatch))
print(labels)
print(true_features)
print(net.get_wts_info(detailed=True))
if np.isnan(total_cost):
print("Epoch:{} Iteration:{}".format(epoch, ibatch))
print(net.get_wts_info(detailed=True))
raise ZeroDivisionError("Nan cost at Epoch:{} Iteration:{}"
"".format(epoch, ibatch))
if epoch % tr_prms['EPOCHS_TO_TEST'] == 0:
print("{:3d} {:>8.2f}".format(net.get_epoch(), total_cost), end=' ')
do_test()
if total_cost > 1e6:
print(net.get_wts_info(detailed=True))
net.inc_epoch_set_rate()
########################################## Final Error Rates
test_err, aux_test_err = test_wrapper(test_fn_te(i)
for i in range(te_corpus_sz//batch_sz))
trin_err, aux_trin_err = test_wrapper(test_fn_tr(i)
for i in range(tr_corpus_sz//batch_sz))
print("{:3d} {:>8.2f}".format(net.get_epoch(), 0), end=' ')
print("{:5.2f}% ({:5.2f}%) {:5.2f}% ({:5.2f}%)".format(
trin_err, aux_trin_err, test_err, aux_test_err))
|
kvaps/vdsm
|
tests/volumeTests.py
|
Python
|
gpl-2.0
| 2,197 | 0 |
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY o
|
r FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation
|
, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import uuid
from testlib import VdsmTestCase as TestCaseBase
from storage import blockSD
SDBLKSZ = 512
class FakeBlockStorageDomain(blockSD.BlockStorageDomain):
DOMAIN_VERSION = 3
def __init__(self, sdUUID, occupiedMetadataSlots=None):
self._sdUUID = sdUUID
self._logBlkSize = SDBLKSZ
self.occupiedMetadataSlots = occupiedMetadataSlots
@property
def sdUUID(self):
return self._sdUUID
@property
def logBlkSize(self):
return self._logBlkSize
@property
def stat(self):
return None
def getVersion(self):
return self.DOMAIN_VERSION
def _getOccupiedMetadataSlots(self):
return self.occupiedMetadataSlots
class BlockDomainMetadataSlotTests(TestCaseBase):
OCCUPIED_METADATA_SLOTS = [(4, 1), (7, 1)]
EXPECTED_METADATA_SLOT = 5
def setUp(self):
self.blksd = FakeBlockStorageDomain(str(uuid.uuid4()),
self.OCCUPIED_METADATA_SLOTS)
def testMetaSlotSelection(self):
with self.blksd.acquireVolumeMetadataSlot(None, 1) as mdSlot:
self.assertEqual(mdSlot, self.EXPECTED_METADATA_SLOT)
def testMetaSlotLock(self):
with self.blksd.acquireVolumeMetadataSlot(None, 1):
acquired = self.blksd._lvTagMetaSlotLock.acquire(False)
self.assertEqual(acquired, False)
|
BlancaCC/cultutrilla
|
python_aprendizaje/ejemplos_básicos/puertas.py
|
Python
|
gpl-3.0
| 1,203 | 0.030075 |
#!/bin/python3.5
# Programa obtenido de hacker run, se le pasa lista con 0 y 1, que simbolizan puertas, 0 la puerta abierta 1 la puerta cerrada.
# Nuestro objetivo es abrir todas las puertas
# si se abre y las subyacentes se abrirán si no están abiertas
# el programa devuelve para una lista de 0 y 1 le mínimo de puertas a abrir y el máximo siguiendo este patrón
import sys
def puertas( doors ):
min = 0
max = 0
i = 1
while i < len( doors) -2 :
# Casos en los que hay reducción
if(doors[i]) == 1:
if doors[ i-1 : i+2] == [1,1,1]:
min += 1
max += 2
i += 2
elif doors[ i] == 1:
min += 1
max += 1
|
i += 1
else:
min += 1
max += 1
i += 1
return [ min , max]
def prueba ( ):
for i in range (10):
|
print (i )
i += i
if __name__ == "__main__":
doors = list ( map( int, input().strip().split(' ')))
print ("La puerta creada: " , doors)
result = puertas (doors)
print( " ".join( map(str , result )))
prueba();
|
gadsbyfly/PyBioMed
|
PyBioMed/PyProtein/PyProteinAAIndex.py
|
Python
|
bsd-3-clause
| 9,731 | 0.002158 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
This module is used for obtaining the properties of amino acids or their pairs
from the aaindex database. You can freely use and distribute it. If you hava
any problem, you could contact with us timely!
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com
"""
# Core Library modules
import os
import string
import sys
AALetter = [
"A",
"R",
"N",
"D",
"C",
"E",
"Q",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
]
_aaindex = dict()
#####################################################################################################
class Record:
"""
Amino acid index (AAindex) Record
"""
aakeys = "ARNDCQEGHILKMFPSTWYV"
def __init__(self):
self.key = None
self.desc = ""
self.ref = ""
self.authors = ""
self.title = ""
self.journal = ""
self.correlated = dict()
self.index = dict()
self.comment = ""
def extend(self, row):
i = len(self.index)
for x in row:
self.index[self.aakeys[i]] = x
i += 1
def get(self, aai, aaj=None, d=None):
assert aaj is None
return self.index.get(aai, d)
def __getitem__(self, aai):
return self.get(aai)
def median(self):
x = sorted(filter(None, self.index.values()))
half = len(x) / 2
if len(x) % 2 == 1:
return x[half]
return (x[half - 1] + x[half]) / 2.0
def __str__(self):
desc = self.desc.replace("\n", " ").strip()
return "%s(%s: %s)" % (self.__class__.__name__, self.key, desc)
#####################################################################################################
class MatrixRecord(Record):
"""
Matrix record for mutation matrices or pair-wise contact potentials
"""
def __init__(self):
Record.__init__(self)
self.index = []
self.rows = dict()
self.cols = dict()
def extend(self, row):
self.index.append(row)
def _get(self, aai, aaj):
i = self.rows[aai]
j = self.cols[aaj]
return self.index[i][j]
def get(self, aai, aaj, d=None):
try:
return self._get(aai, aaj)
except:
pass
try:
return self._get(aaj, aai)
except:
return d
def __getitem__(self, aaij):
return self.get(aaij[0], aaij[1])
def median(self):
x = []
for y in self.index:
x.extend(filter(None, y))
x.sort()
if len(x) % 2 == 1:
return x[len(x) / 2]
return sum(x[len(x) / 2 - 1 : len(x) / 2 + 1]) / 2.0
#####################################################################################################
def search(pattern, searchtitle=True, casesensitive=False):
"""
Search for pattern in description and title (optional) of all records and
return matched records as list. By default search case insensitive.
"""
whatcase = lambda i: i
if not casesensitive:
pattern = pattern.lower()
whatcase = lambda i: i.lower()
matches = []
for record in _aaindex.itervalues():
if (
pattern in whatcase(record.desc)
or searchtitle
and pattern in whatcase(record.title)
):
matches.append(record)
return matches
#####################################################################################################
def grep(pattern):
"""
Search for pattern in title and description of all records (case
insensitive) and print results on standard output.
"""
for record in search(pattern):
print(record)
#####################################################################################################
def get(key):
"""
Get record for key
"""
if len(_aaindex) == 0:
init()
return _aaindex[key]
#####################################################################################################
def _float_or_None(x):
if x == "NA" or x == "-":
return None
return float(x)
#####################################################################################################
def init(path=None, index="123"):
"""
Read in the aaindex files. You need to run this (once) before you can
access any records. If the files are not within the current directory,
you need to specify the correct directory path. By default all three
aaindex files are read in.
"""
index = str(index)
if path is None:
for path in [os.path.split(__file__)[0], "."]:
if os.path.exists(os.path.join(path, "aaindex" + index[0])):
break
print("path =", path, file=sys.stderr)
if "1" in index:
_parse(path + "/aaindex1", Record)
if "2" in index:
_parse(path + "/aaindex2", MatrixRecord)
if "3" in index:
_parse(path + "/aaindex3", MatrixRecord)
##############################################################################################
|
#######
def init_from_file(filename, type=Record):
_parse(filename, type)
#####################################################################################################
def _parse(filename, rec, quiet=True):
"""
Parse aaindex input file. `rec` must be `Record` for aaindex1 and
`MarixRecord` for aaindex
|
2 and aaindex3.
"""
if not os.path.exists(filename):
import urllib
url = (
"ftp://ftp.genome.jp/pub/db/community/aaindex/" + os.path.split(filename)[1]
)
# print 'Downloading "%s"' % (url)
filename = urllib.urlretrieve(url, filename)[0]
# print 'Saved to "%s"' % (filename)
f = open(filename)
current = rec()
lastkey = None
for line in f:
key = line[0:2]
if key[0] == " ":
key = lastkey
if key == "//":
_aaindex[current.key] = current
current = rec()
elif key == "H ":
current.key = line[2:].strip()
elif key == "R ":
current.ref += line[2:]
elif key == "D ":
current.desc += line[2:]
elif key == "A ":
current.authors += line[2:]
elif key == "T ":
current.title += line[2:]
elif key == "J ":
current.journal += line[2:]
elif key == "* ":
current.comment += line[2:]
elif key == "C ":
a = line[2:].split()
for i in range(0, len(a), 2):
current.correlated[a[i]] = float(a[i + 1])
elif key == "I ":
a = line[1:].split()
if a[0] != "A/L":
current.extend(map(_float_or_None, a))
elif list(Record.aakeys) != [i[0] for i in a] + [i[-1] for i in a]:
print("Warning: wrong amino acid sequence for", current.key)
else:
try:
assert list(Record.aakeys[:10]) == [i[0] for i in a]
assert list(Record.aakeys[10:]) == [i[2] for i in a]
except:
print("Warning: wrong amino acid sequence for", current.key)
elif key == "M ":
a = line[2:].split()
if a[0] == "rows":
if a[4] == "rows":
a.pop(4)
assert a[3] == "cols" and len(a) == 6
i = 0
for aa in a[2]:
current.rows[aa] = i
i += 1
i = 0
for aa in a[5]:
current.cols[aa] = i
i += 1
else:
current.extend(map(_float_or_None, a))
elif not quiet:
print('Warning:
|
yanggujun/meagram
|
login.py
|
Python
|
mit
| 312 | 0.00641 |
import falcon
import json
class LoginController:
def on_post(self, req, resp):
bod
|
y = req.stream.read()
loginInfo = json.loads(body)
print 'user: ' + loginInfo['userName']
print 'pass: ' + loginInfo['password']
resp.status = falcon.HTTP_200
|
resp.body = 'ok'
|
jedlitools/find-for-me
|
ex28_context_search.py
|
Python
|
mit
| 1,510 | 0.009187 |
import re
text = open('khalifa_tarikh.txt', mode="r", encoding="utf-8").read()
text = re.sub(r"َ|ً|ُ|ٌ|ِ|ٍ|ْ|ّ|ـ", "", text)
def search_words(checklist):
search_words = open(checklist, mode='r', encoding='utf-8').read().splitlines()
return search_words
def index_generator(word, text):
juz = 'الجزء:'
safha = 'الصفحة:'
page_regex = juz + r' \d+ ¦ ' + safha + r' \d+'
search_regex = word + r'.+?(' + page_regex + ')'
pagination = re.findall(search_regex, text, re.DOTALL)
return pagination
region = r"[وفبل]{0,2}"+r"[اأإآ]" +"فريقي" +r"[اةه]"
def context_search(region, checklist):
gov_words = search_words(checklist)
regex = "(?:\S+\s+){0,8}"+region+"(?:\s+\S+){0,8}"
contexts = re.findall(regex, text, re
|
.DOTALL)
outcomes = []
for passage in contexts:
for word in gov_words:
pre_all = r"(?:و|ف|ب|ل|ك|ال|أ|س|ت|ي|ن|ا){0,6}"
su_all = r"(?:و|ن|ه|ى|ا|تما|ها|نا|ت|تم|هم|كم|ة|كما|تمو|كن|هما|ي|وا|ني|ات|هن|تن
|
|ك|تا){0,4}"
regex_w = r"\b" + pre_all + word + su_all + r"\b"
if len(re.findall(regex_w, passage)) > 0:
passage_page = index_generator(passage, text)
passage = re.sub(r"\n", " ", passage)
outcomes.append((passage, passage_page))
break
return outcomes
governors = context_search(region, 'governors_checklist.txt')
print(governors)
|
kevin-coder/tensorflow-fork
|
tensorflow/python/data/experimental/ops/indexed_dataset_ops.py
|
Python
|
apache-2.0
| 4,824 | 0.004146 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for indexed datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class MaterializedIndexedDataset(object):
"""MaterializedIndexedDataset is highly experimental!
"""
def __init__(self, materialized_resource, materializer, output_classes,
output_types, output_shapes):
self._materialized_resource = materialized_resource
self._materializer = materializer
self._output_classes = output_classes
self._output_types = output_types
self._output_shapes = output_shapes
@property
def initializer(self):
if self._materializer is not None:
return self._materializer
raise ValueError("MaterializedDataset does not have a materializer")
def get(self, index):
"""Get retrieves a value (or set of values) from the IndexedDataset.
Args:
index: A uint64 scalar or vector tensor with the indices to retrieve.
Returns:
A tensor containing the values corresponding to `index`.
"""
# TODO(saeta): nest.pack_sequence_as(...)
return ged_ops.experimental_indexed_dataset_get(
self._materialized_resource,
index,
output_types=nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_types(self._output_shapes, self._output_classes)))
# TODO(saeta): Add a `DatasetV1` wrapper if this is exposed via the public API.
class IndexedDataset(dataset_ops.Dataset):
"""IndexedDataset is highly experimental!
"""
def __init__(self):
pass
def materialize(self, shared_name=None, container=None):
"""Materialize creates a MaterializedIndexedDataset.
IndexedDatasets can be combined through operations such as TBD. Therefore,
they are only materialized when absolutely required.
Args:
shared_name: a string for the shared name to use for the resource.
container: a string for the container to store the resource.
Returns:
A MaterializedIndexedDataset.
"""
if container is None:
containe
|
r = ""
if shared_name is None:
shared_name = ""
materialized_resource = (
ged_ops.experimental_materialized_index_dataset_handle(
container=container,
shared_name=shared_name,
**dataset_ops.flat_structure(self)))
with ops.colocate_with(materialized_resource):
materializer = ged_ops.experimental_indexed_dataset_materialize(
self._as_variant_tensor(), materialized_resource)
return MaterializedIndexedDataset
|
(materialized_resource, materializer,
self.output_classes, self.output_types,
self.output_shapes)
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a `tf.variant` `tf.Tensor` representing this IndexedDataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this
IndexedDataset.
"""
raise NotImplementedError("IndexedDataset._as_variant_tensor")
# TODO(saeta): Add a `DatasetV1` wrapper if this is exposed via the public API.
class IdentityIndexedDataset(IndexedDataset):
"""IdentityIndexedDataset is a trivial indexed dataset used for testing.
"""
def __init__(self, size):
super(IdentityIndexedDataset, self).__init__()
# TODO(saeta): Verify _size is a scalar!
self._size = ops.convert_to_tensor(size, dtype=dtypes.uint64, name="size")
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.uint64, [])
def _as_variant_tensor(self):
return ged_ops.experimental_identity_indexed_dataset(self._size)
def _inputs(self):
return []
|
jmtyszka/CBICQA
|
bin/cbicqc_incoming.py
|
Python
|
mit
| 4,170 | 0.001439 |
#!/usr/bin/env python3
"""
Rename and organize Horos QC exported data in <BIDS Root>/incoming and place in <BIDS Root>/sourcedata
AUTHOR
----
Mike Tyszka, Ph.D.
MIT License
Copyright (c) 2019 Mike Tyszka
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIA
|
BILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
from glob import glob
import argparse
from pathlib import Path
import pydicom
from shutil import rmtree
def main():
parser = argparse.ArgumentParser(description='Fix subject and session directory naming in Horos output')
parser.add_argument('-d', '--dataset'
|
, default='.',
help='BIDS dataset directory containing sourcedata subdirectory')
# Parse command line arguments
args = parser.parse_args()
dataset_dir = os.path.realpath(args.dataset)
incoming_dir = os.path.join(dataset_dir, 'incoming')
sourcedata_dir = os.path.join(dataset_dir, 'sourcedata')
qc_dir = os.path.join(sourcedata_dir, 'QC')
# Create single QC subject
print("Checking that QC subject exists in sourcedata")
if os.path.isdir(qc_dir):
print(" It does - continuning")
else:
print(" QC subject does not exist - creating QC subject in sourcedata")
os.makedirs(qc_dir, exist_ok=True)
# Loop over all Qc study directories in sourcedata
# Expect subject/session directory names in the form "Qc_<session ID>_*/<session dir>/"
# Move session subdirectories from Qc_*/<session dir> to Qc/<ScanDate>
print("Scanning for incoming QC studies")
for inc_qc_dir in glob(os.path.join(incoming_dir, 'Qc*')):
print("")
print(" Processing {}".format(inc_qc_dir))
# There should be only one session subdirectory
dlist = list(glob(os.path.join(inc_qc_dir, '*')))
if len(dlist) > 0:
ses_dir = dlist[0]
# Get first DICOM file in ses_dir at any level
first_dcm = str(list(Path(ses_dir).rglob("*.dcm"))[0])
# Get acquisition date from DICOM header
acq_date = acquisition_date(first_dcm)
# Destination session directory name in QC subject folder
dest_dir = os.path.join(qc_dir, acq_date)
# Move and rename session subdirectory
print(' Moving %s to %s' % (ses_dir, dest_dir))
os.rename(ses_dir, dest_dir)
# Delete incoming Qc_* directory
print(' Deleting %s' % inc_qc_dir)
rmtree(inc_qc_dir)
def acquisition_date(dcm_fname):
"""
Extract acquisition date from DICOM header
:param dcm_fname: DICOM filename
:return acq_date: str, acquisition date (YYYYMMDD)
"""
# Default return date
acq_date = '19010101'
if not os.path.isfile(dcm_fname):
print('* File not found - %s' % dcm_fname)
try:
ds = pydicom.read_file(dcm_fname, force=True)
except IOError:
print("* Problem opening %s" % dcm_fname)
raise
except AttributeError:
print("* Problem opening %s" % dcm_fname)
raise
if ds:
acq_date = ds.AcquisitionDate
else:
print('* DICOM header problem - returning %s' % acq_date)
return acq_date
if 'main' in __name__:
main()
|
tbielawa/Taboot
|
taboot/__init__.py
|
Python
|
gpl-3.0
| 1,161 | 0 |
# -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright © 2009-2011, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is
|
distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The Taboot task library.
Taboot was created as a framework to do code deployments which
require a repetitive set of t
|
asks to be run in a certain order against
certain groups of hosts.
"""
__docformat__ = 'restructuredtext'
__author__ = "John Eckersberg"
__license__ = 'GPLv3+'
__version__ = '0.4.0'
__url__ = 'https://fedorahosted.org/Taboot/'
edit_header = '/usr/share/taboot/edit-header'
|
mozaik-association/mozaik
|
mail_job_priority/wizards/mail_compose_message.py
|
Python
|
agpl-3.0
| 1,920 | 0 |
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/ag
|
pl).
import ast
from odoo import api, exceptions, models, _
class MailComposeMessage(models.TransientModel):
_inherit = 'mail.compose.message'
@api.model
def _get_priorities(self):
"""
Load priorities from parameters.
:return: dict
"""
key = 'mail.sending.job.priorities'
try:
priorities = ast.literal_eval(
self.env['ir.config_parameter'].sudo().get_param(
key, default='{}'))
# Catch except
|
ion to have a understandable error message
except (ValueError, SyntaxError):
raise exceptions.UserError(
_("Error to load the system parameter (%s) "
"of priorities") % key)
# As literal_eval can transform str into any format, check if we
# have a real dict
if not isinstance(priorities, dict):
raise exceptions.UserError(
_("Error to load the system parameter (%s) of priorities.\n"
"Invalid dictionary") % key)
return priorities
@api.multi
def send_mail(self, auto_commit=False):
"""
Set a priority on subsequent generated mail.mail, using priorities
set into the configuration.
:return: dict/action
"""
active_ids = self.env.context.get('active_ids')
default_priority = self.env.context.get('default_mail_job_priority')
if active_ids and not default_priority:
priorities = self._get_priorities()
size = len(active_ids)
limits = [lim for lim in priorities if lim <= size]
if limits:
prio = priorities.get(max(limits))
self = self.with_context(default_mail_job_priority=prio)
return super().send_mail(auto_commit=auto_commit)
|
habibmasuro/django-wiki
|
wiki/plugins/attachments/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 12,406 | 0.008061 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attachment'
db.create_table('attachments_attachment', (
('reusableplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ReusablePlugin'], unique=True, primary_key=True)),
('current_revision', self
|
.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='current_set', unique=True, null=True, to=orm['attachments.Attach
|
mentRevision'])),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
))
db.send_create_signal('attachments', ['Attachment'])
# Adding model 'AttachmentRevision'
db.create_table('attachments_attachmentrevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.AttachmentRevision'], null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('attachment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.Attachment'])),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('attachments', ['AttachmentRevision'])
def backwards(self, orm):
# Deleting model 'Attachment'
db.delete_table('attachments_attachment')
# Deleting model 'AttachmentRevision'
db.delete_table('attachments_attachmentrevision')
models = {
'attachments.attachment': {
'Meta': {'object_name': 'Attachment', '_ormbases': ['wiki.ReusablePlugin']},
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'reusableplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ReusablePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'attachments.attachmentrevision': {
'Meta': {'ordering': "('created',)", 'object_name': 'AttachmentRevision'},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.Attachment']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.AttachmentRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'
|
sckasturi/saltlake
|
commands/time.py
|
Python
|
gpl-2.0
| 1,137 | 0.003518 |
# Copyright (C) 2013-20
|
14 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTA
|
BILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import time
from helpers.command import Command
@Command(['time', 'date'])
def cmd(send, msg, args):
"""Tells the time.
Syntax: {command}
"""
bold = '\x02'
if not msg:
msg = bold + "Date: " + bold + "%A, %m/%d/%Y" + bold + " Time: " + bold + "%H:%M:%S"
send(time.strftime(msg))
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/remote/models/device_object_object_remote.py
|
Python
|
apache-2.0
| 3,242 | 0.004318 |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceObjectObjectRemote(RemoteModel):
"""
Network Objects cross usage
| ``DeviceObjectObjectID:`` The internal NetMRI identifier of this usage relationship between network objects.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device to which belongs this network objects.
| ``attribute type:`` number
| ``ParentDeviceObjectID:`` The internal NetMRI identifier of the parent network object (the user).
| ``attribute type:`` number
| ``ChildDeviceObjectID:`` The internal NetMRI identifier of the child network object (the used service).
| ``attribute type:`` number
| ``OoFirstSeenTime:`` The timestamp of when NetMRI saw for the first time this relationship.
| ``attribute type:`` datetime
| ``OoProvisionData:`` Internal data - do not modify, may change without warning.
| ``attribute type:`` string
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``OoStartTime:`` The starting effective time of this record.
| ``attribute type:`` datetime
| ``OoEndTime:`` The ending effective time of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``OoTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``OoChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
"""
properties = ("DeviceObjectObjectID",
"DeviceID",
"ParentDeviceObjectID",
"ChildDeviceObjectID",
"OoFirstSeenTime",
"OoProvisionData",
"DataSourceID",
"OoStartTime",
"OoEndTime",
"OoTimestamp",
"OoChangedCols",
)
@property
@check_api_availability
def parent_device_object(self):
"""
The parent network object of this relationship.
``attribute type:`` model
"""
return self.broker.parent_device_object(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def child_device_object(self):
"""
The child network object of this relationship.
``attribute type:`` model
"""
return self.broker.child_device_object(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@pro
|
perty
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"DeviceObjectObjectID": self.Devi
|
ceObjectObjectID})
|
hiidef/hiispider
|
hiispider/metacomponents/__init__.py
|
Python
|
mit
| 805 | 0.008696 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Meta level spider components. Eac
|
h communicates with one or more servers via
sub-components.
"""
from .pagegetter import PageGetter
from .worker import Worker
from .interface import Interface
from .jobgetter import JobGetter
from .jobscheduler impo
|
rt JobScheduler
from .identityscheduler import IdentityScheduler
from .testing import Testing
from .deltatesting import DeltaTesting
from .identitygetter import IdentityGetter
from .identityworker import IdentityWorker
from .identityinterface import IdentityInterface
from .base import MetaComponent
__all__ = ['PageGetter', 'Worker', 'JobGetter', 'Interface', "JobScheduler",
"IdentityScheduler", "Testing", "DeltaTesting", "MetaComponent",
"IdentityGetter", "IdentityWorker", "IdentityInterface"]
|
pombredanne/SourceForge-Allura
|
ForgeHg/forgehg/tests/functional/test_controllers.py
|
Python
|
apache-2.0
| 9,128 | 0.002082 |
import json
import pkg_resources
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import c
from ming.orm import ThreadLocalORMSession
from datadiff.tools import assert_equal
from allura.lib import helpers as h
from allura.tests import decorators as td
from allura import model as M
from alluratest.controller import TestController
class TestRootController(TestController):
def setUp(self):
TestController.setUp(self)
self.setup_with_tools()
@td.with_hg
def setup_with_tools(self):
h.set_context('test', 'src-hg', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgehg', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testrepo.hg'
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-hg', neighborhood='Projects')
c.app.repo.refresh()
def test_fork(self):
to_project = M.Project.query.get(shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-hg/fork', params=dict(
project_id=str(to_project._id),
mount_point='code'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
r = self.app.get('/p/test2/code').follow().follow().follow()
assert 'Clone of' in r
r = self.app.get('/src-hg/').follow().follow()
assert 'Forks' in r
def test_merge_request(self):
to_project = M.Project.query.get(shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-hg/fork', params=dict(
project_id=str(to_project._id),
mount_point='code'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
r = self.app.get('/p/test2/code/').follow().follow()
assert 'Request Merge' in r
# Request Merge button only visible to repo admins
kw = dict(extra_environ=dict(username='test-user'))
r = self.app.get('/p/test2/code/', **kw).follow(**kw).follow(**kw)
assert 'Request Merge' not in r, r
# Request merge controller action only permitted for repo admins
r = self.app.get('/p/test2/code/request_merge', status=403, **kw)
r = self.app.get('/p/test2/code/request_merge')
assert 'Request merge' in r
# Merge request detail view
r = r.forms[0].submit().follow()
assert 'would like you to merge' in r
mr_num = r.request.url.split('/')[-2]
# Merge request list view
r = self.app.get('/p/test/src-hg/merge-requests/')
assert 'href="%s/"' % mr_num in r
# Merge request status update
r = self.app.post('/p/test/src-hg/merge-requests/%s/save' % mr_num,
params=dict(status='rejected')).follow()
assert 'Merge Request #%s: (rejected)' % mr_num in r, r
def test_status(self):
resp = self.app.get('/src-hg/status')
d = json.loads(resp.body)
assert d == dict(status='ready')
def test_status_html(self):
resp = self.app.get('/src-hg/').follow().follow()
# repo status not displayed if 'ready'
assert None == resp.html.find('div', dict(id='repo_status'))
h.set_context('test', 'src-hg', neighborhood='Projects')
c.app.repo.status = 'analyzing'
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
# repo status displayed if not 'ready'
resp = self.app.get('/src-hg/').follow().follow()
div = resp.html.find('div', dict(id='repo_status'))
assert div.span.text == 'analyzing'
def test_index(self):
resp = self.app.get('/src-hg/').follow().follow()
assert 'hg clone http://' in resp, resp
def test_index_empty(self):
self.app.get('/test-app-hg/')
def test_commit_browser(self):
resp = self.app.get('/src-hg/commit_browser')
def test_commit_browser_data(self):
resp = self.app.get('/src-hg/commit_browser_data')
data = json.loads(resp.body);
assert data['max_row'] == 5
assert data['next_column'] == 1
assert_equal(data['built_tree']['e5a0b44437be783c41084e7bf0740f9b58b96ecf'],
{u'url': u'/p/test/src-hg/ci/e5a0b44437be783c41084e7bf0740f9b58b96ecf/',
u'oid': u'e5a0b44437be783c41084e7bf0740f9b58b96ecf',
u'column': 0,
u'parents': [u'773d2f8e3a94d0d5872988b16533d67e1a7f5462'],
u'message': u'Modify README', u'row': 4})
def _get_ci(self):
resp = self.app.get('/src-hg/').follow().follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-hg/ci/'):
return tag['href']
return None
def test_commit(self):
ci = self._get_ci()
resp = self.app.get(ci)
assert 'Rick Copeland' in resp, resp.showbrowser()
def test_tree(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/')
assert len(resp.html.findAll('tr')) == 4, resp.showbrowser()
assert 'README' in resp, resp.showbrowser()
def test_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README')
assert 'README' in resp.html.find('h2', {'class':'dark title'}).contents[2]
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert 'This is readme' in content, content
assert '<span id="l1" class="code_block">' in resp
assert 'var hash = window.location.hash.substring(1);' in resp
resp = self.app.get(ci + 'tree/test.jpg')
def test_invalid_file(self)
|
:
ci = self._get_ci()
resp = self.app.get(ci + 'tree/READMEz', status=404)
def test_diff(self):
|
ci = '/p/test/src-hg/ci/e5a0b44437be783c41084e7bf0740f9b58b96ecf/'
parent = '773d2f8e3a94d0d5872988b16533d67e1a7f5462'
resp = self.app.get(ci + 'tree/README?barediff=' + parent,
validate_chunk=True)
assert 'readme' in resp, resp.showbrowser()
assert '+++' in resp, resp.showbrowser()
assert '+Another line' in resp, resp.showbrowser()
def test_binary_diff(self):
ci = '/p/test/src-hg/ci/5a0a993efa9bce7d1983344261393e841fcfd65d/'
parent = '4a7f7ec0dcf5f005eb5d177b3d8c00bfc8159843'
resp = self.app.get(ci + 'tree/bin_file?barediff=' + parent,
validate_chunk=True)
assert 'Cannot display: file marked as a binary type.' in resp
class TestLogPagination(TestController):
def setUp(self):
TestController.setUp(self)
self.setup_with_tools()
@td.with_hg
def setup_with_tools(self):
h.set_context('test', 'src-hg', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgehg', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'paginationtest.hg'
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-hg', neighborhood='Projects')
c.app.repo.refresh()
def _get_ci(self):
resp = self.app.get('/src-hg/').follow().follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-hg/ci/'):
return tag['href']
return None
def test_show_pagination(self):
resp = self.app.
|
TickSmith/tickvault-python-api
|
tickvaultpythonapi/parsing/predicate.py
|
Python
|
mit
| 3,342 | 0.002095 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 TickSmith Corp.
#
# Licensed under the MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Provides reusable query structure
'''
import sys
from tickvaultpythonapi.parsing.operation import Operation, BaseOperation
class Predicate(object):
key = ""
operation = ""
value = ""
opClass = Operation() # Defaults to operation, which allows no operations
def __init__(self, key, op, val):
"""
Assign key, operation and value
"""
self.key = key
self.operation = self.get_valid_op(op)
self.value = val
def get_valid_op(self, op):
"""
Uses opClass (subtypes of Operation) to determine whether the
given operation is allowed. If it is, it returns the string that
will be appended to the key name (e
|
x. '>' results in 'Gte', so that the
query will be 'keyGte')
"
|
""
try:
return self.opClass.get_str(op)
except Exception as e:
sys.exit(e)
def get_as_kv_pair(self):
"""
Get as key-value pair
(ex. key = 'price', operation = '!=', value = '50',
result= {"priceNeq" : "50"})
"""
return {self.key + self.operation : str(self.value)}
def get_as_tuple(self):
"""
Get as tuple
(ex. key = 'price', operation = '!=', value = '50',
result= ("priceNeq","50")
"""
return (self.key + self.operation, str(self.value))
def __str__(self):
"""
@Overrride of __str__()
"""
return self.key + self.operation + "=" + str(self.value)
class BasePredicate(Predicate):
# Replace opClass with BaseOperation
opClass = BaseOperation()
# Getter for opClass
@classmethod
def get_op_class(self):
return self.opClass
if __name__ == '__main__':
params = {"param1":"value1"}
bp = BasePredicate("line_type", "=", "T,E")
print(bp.opClass.op_to_str)
p = bp.get_as_kv_pair()
params = {**params, **p}
print(params)
print(BasePredicate("price", ">", 7).get_as_kv_pair())
print(BasePredicate("price", ">=", "a"))
print(BasePredicate("price", "<=", "7").get_as_kv_pair())
print(BasePredicate("price", "!=", "7"))
|
Osndok/zim-desktop-wiki
|
zim/plugins/bookmarksbar.py
|
Python
|
gpl-2.0
| 18,469 | 0.030646 |
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Pavel_M <plprgt@gmail.com>,
# released under the GNU GPL version 3.
# This plugin is for Zim program by Jaap Karssenberg <jaap.karssenberg@gmail.com>.
#
# This plugin uses an icon from Tango Desktop Project (http://tango.freedesktop.org/)
# (the Tango base icon theme is released to the Public Domain).
import gobject
import gtk
import pango
from zim.actions import toggle_action, action
from zim.plugins import PluginClass, extends, WindowExtension
from zim.notebook import Path
from zim.gui.widgets import TOP, TOP_PANE
from zim.signals import ConnectorMixin
from zim.gui.pathbar import ScrolledHBox
from zim.gui.clipboard import Clipboard
import logging
logger = logging.getLogger('zim.plugins.bookmarksbar')
# Keyboard shortcut constants.
BM_TOGGLE_BAR_KEY = 'F4'
BM_ADD_BOOKMARK_KEY = '<alt>0'
class BookmarksBarPlugin(PluginClass):
plugin_info = {
'name': _('BookmarksBar'), # T: plugin name
'description': _('''\
This plugin provides bar for bookmarks.
'''), # T: plugin description
'author': 'Pavel_M',
'help': 'Plugins:BookmarksBar', }
plugin_preferences = (
# key, type, label, default
('max_bookmarks', 'int', _('Maximum number of bookmarks'), 15, (5, 20)), # T: plugin preference
('save', 'bool', _('Save bookmarks'), True), # T: preferences option
('add_bookmarks_to_beginning', 'bool', _('Add new bookmarks to the beginning of the bar'), False), # T: preferences option
)
@extends('MainWindow')
class MainWindowExtension(WindowExtension):
uimanager_xml = '''
<ui>
<menubar name='menubar'>
<menu action='view_menu'>
<placeholder name='plugin_items'>
<menuitem action='toggle_show_bookmarks'/>
</placeholder>
</menu>
<menu action='tools_menu'>
<placeholder name='plugin_items'>
<menuitem action='add_bookmark'/>
</placeholder>
</menu>
<menu action='go_menu'>
<placeholder name='plugin_items'>
<menu action='go_bookmarks_menu'>
<menuitem action='bookmark_1'/>
<menuitem action='bookmark_2'/>
<menuitem action='bookmark_3'/>
<menuitem action='bookmark_4'/>
<menuitem action='bookmark_5'/>
<menuitem action='bookmark_6'/>
<menuitem action='bookmark_7'/>
<menuitem action='bookmark_8'/>
<menuitem action='bookmark_9'/>
</menu>
</placeholder>
</menu>
</menubar>
<toolbar name='toolbar'>
<placeholder name='tools'>
<toolitem action='toggle_show_bookmarks'/>
</placeholder>
</toolbar>
</ui>
'''
uimanager_menu_labels = {
'go_bookmarks_menu': _('Book_marks'), # T: Menu title
}
def __init__(self, plugin, window):
WindowExtension.__init__(self, plugin, window)
self.widget = BookmarkBar(self.window.ui, self.uistate,
self.window.pageview.get_page)
self.widget.show_all()
# Add a new option to the Index popup menu.
try:
self.widget.connectto(self.window.pageindex.treeview,
'populate-popup', self.on_populate_popup)
except AttributeError:
logger.error('BookmarksBar: popup menu not initialized.')
# Show/hide bookmarks.
self.uistate.setdefault('show_bar', True)
self.toggle_show_bookmarks(self.uistate['show_bar'])
# Init preferences in self.widget.
self.widget.on_preferences_changed(plugin.preferences)
self.widget.connectto(plugin.preferences, 'changed',
lambda o: self.widget.on_preferences_changed(plugin.preferences))
def teardown(self):
if self.widget:
try:
self.window.remove(self.widget)
except ValueError:
pass
self.widget.disconnect_all()
self.widget = None
def hide_widget(self):
'''Hide Bar.'''
self.window.remove(self.widget)
def show_widget(self):
'''Show Bar.'''
self.window.add_widget(self.widget, (TOP_PANE, TOP))
def on_populate_popup(self, treeview, menu):
'''Add 'Add Bookmark' option to the Index popup menu.'''
path = treeview.get_selected_path()
if path:
item = gtk.SeparatorMenuItem()
menu.prepend(item)
item = gtk.MenuItem(_('Add Bookmark')) # T: menu item bookmark plugin
page = self.window.ui.notebook.get_page(path)
item.connect('activate', lambda o: self.widget.add_new_page(page))
menu.prepend(item)
menu.show_all()
@action(_('_Run bookmark'), accelerator='<alt>1')
def bookmark_1(self):
self._open_bookmark(1)
@action(_('_Run bookmark'), accelerator='<alt>2')
def bookmark_2(self):
self._open_bookmark(2)
@action(_('_Run bookmark'), accelerator='<alt>3')
def bookmark_3(self):
self._open_bookmark(3)
@action(_('_Run bookmark'), accelerator='<alt>4')
def bookmark_4(self):
self._open_bookmark(4)
@action(_('_Run bookmark'), accelerator='<alt>5')
def bookmark_5(self):
self._open_bookmark(5)
@action(_('_Run bookmark'), accelerator='<alt>6')
def bookmark_6(self):
self._open_bookmark(6)
@action(_('_Run bookmark'), accelerator='<alt>7')
def bookmark_7(self):
self._open_bookmark(7)
@action(_('_Run bookmark'), accelerator='<alt>8')
def bookmark_8(self):
self._open_bookmark(8)
@action(_('_Run bookmark'), accelerator='<alt>9')
def bookmark_9(self):
self._open_bookmark(9)
def _open_bookmark(self, number):
number -= 1
try:
self.window.ui.open_page(Path(self.widget.paths[number]))
except IndexError:
pass
@toggle_action(_('Bookmarks'), stock='zim-add-bookmark',
tooltip = 'Show/Hide Bookmarks', accelerator = BM_TOGGLE_BAR_KEY) # T: menu item bookmark plugin
def toggle_show_bookmarks(self, active):
'''
Show/hide the bar with bookmarks.
'''
if active:
self.show_widget()
else:
self.hide_widget()
self.uistate['show_bar'] = active
@action(_('Add Bookmark'), accelerator = BM_ADD_BOOKMARK_KEY) # T: menu item bookmark plugin
def add_bookmark(self):
'''
Function to add new bookmarks to the bar.
Introduced to be used via keyboard shortcut.
'''
self.widget.add_new_page()
class BookmarkBar(gtk.HBox, ConnectorMixin):
def __init__(self, ui, uistate, get_page_func):
gtk.HBox.__init__(self)
self.ui = ui
self.uistate = uistate
self.save_flag = False # if True save bookmarks in config
self.add_bookmarks_to_beginning = False # add new bookmarks to the end of the bar
self.max_bookmarks = False # maximum number of bookmarks
self._get_page = get_page_func # function to get current page
# Create button to add new
|
bookmarks.
self.plus_button = IconsButton(gtk.STOCK_ADD, gtk.STOCK
|
_REMOVE, relief = False)
self.plus_button.set_tooltip_text(_('Add bookmark/Show settings'))
self.plus_button.connect('clicked', lambda o: self.add_new_page())
self.plus_button.connect('button-release-event', self.do_plus_button_popup_menu)
self.pack_start(self.plus_button, expand = False)
# Create widget for bookmarks.
self.container = ScrolledHBox()
self.pack_start(self.container, expand = True)
# Toggle between full/short page names.
self.uistate.setdefault('show_full_page_name', False)
# Save path to use later in Copy/Paste menu.
self._saved_bookmark = None
self.paths = [] # list of bookmarks as string objects
self.uistate.setdefault('bookmarks', [])
# Add pages from config to the bar.
for path in self.uistate['bookmarks']:
page = self.ui.notebook.get_page(Path(path))
if page.exists() and (page.name not in self.paths):
self.paths.append(page.name)
self.paths_names = {} # dict of changed names of bookmarks
self.uistate.setdefault('bookmarks_names', {})
# Function to transform random string to paths_names format.
self._convert_path_name = lambda a: ' '.join(a[:25].split())
# Add alternative bookmark names from config.
for path, name in self.uistate['bookmarks_names'].iteritems():
if path in self.paths:
try:
name = self._convert_path_name(name)
self.paths_names[path] = name
except:
logger.error('BookmarksBar: Error while loading path_names.')
# Look for new pages to mark corresponding bookmarks in the bar.
self.connectto(self.ui, 'open-page', self.on_open_page)
# Delete a bookmark if a page is deleted.
self.connectto(self.ui.notebook, 'deleted-page',
lambda obj, path: self.delete(path.name))
def on_open_pag
|
natewlew/mythtvarchiveserver
|
setup.py
|
Python
|
gpl-2.0
| 1,038 | 0.014451 |
"""
:synopsis: Setup
:copyright: 2014 Nathan Lewis, See LICENSE.txt
.. moduleauthor:: Nathan Lewis <natewlew@gmail.com>
"""
__version__ = '0.1'
__author__ = 'Nathan Lewis'
__email__ = 'natewlew@gmail.com'
__license__ = 'GPL Versi
|
on 2'
try:
import twisted
except ImportError:
raise SystemEx
|
it("twisted not found. Make sure you "
"have installed the Twisted core package.")
#python-sqlalchemy, python-twisted
from setuptools import setup
setup(
name = "MythTVArchiveServer",
version = __version__,
author = __author__,
author_email = __email__,
license = __license__,
packages=['MythTVArchiveServer', 'MythTVArchiveServer.controllers', 'MythTVArchiveServer.lib',
'MythTVArchiveServer.models', 'MythTVArchiveServer.util', 'MythTVArchiveServer.resource',
'twisted.plugins',],
package_data={
'twisted': ['plugins/mythtvarchiveserver_plugin.py',
'plugins/mythtvarchiveserver_media_plugin.py'],
},
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.