repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
USStateDept/FPA_Core
|
openspending/model/dimension.py
|
Python
|
agpl-3.0
| 20,686 | 0.00116 |
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer
from sqlalchemy.sql.expression import select, func
from openspending.core import db
from openspending.model.attribute import Attribute
from openspending.model.common import TableHandler, ALIAS_PLACEHOLDER
from openspending.model.constants import DATE_CUBES_TEMPLATE
from openspending.validation.data import InvalidData
class Dimension(object):
""" A base class for dimensions. A dimension is any property of an entry
that can serve to describe it beyond its purely numeric ``Measure``. """
def __init__(self, model, name, data):
self._data = data
self.model = model
self.name = name
self.key = data.get('key', False)
self.label = data.get('label', name)
self.type = data.get('type', name)
self.description = data.get('description', name)
self.facet = data.get('facet')
def join(self, from_clause):
return from_clause
def drop(self, bind):
del self.column
@property
def is_compound(self):
""" Test whether or not this dimension object is compound. """
return isinstance(self, CompoundDimension)
def __getitem__(self, name):
raise KeyError()
def __repr__(self):
return "<Dimension(%s)>" % self.name
def as_dict(self):
# FIXME: legacy support
d = self._data.copy()
|
d['key'] = self.name
d['name'] = self.name
return d
def has_attribute(self, attribute):
"""
Check whether an instance has a given attribute.
This methods exposes the hasattr for parts of OpenSpending
|
where hasattr isn't accessible (e.g. in templates)
"""
return hasattr(self, attribute)
class GeomTimeAttribute(Dimension, Attribute):
""" A simple dimension that does not create its own values table
but keeps its values directly as columns on the facts table. This is
somewhat unusual for a star schema but appropriate for properties such as
transaction identifiers whose cardinality roughly equals that of the facts
table.
"""
def __init__(self, model, name, data):
Attribute.__init__(self, model, name, data)
Dimension.__init__(self, model, name, data)
def __repr__(self):
return "<GeomTimeAttribute(%s)>" % self.name
def members(self, conditions="1=1", limit=None, offset=0):
""" Get a listing of all the members of the dimension (i.e. all the
distinct values) matching the filter in ``conditions``. """
query = select([self.column_alias], conditions,
limit=limit, offset=offset, distinct=True)
rp = self.model.bind.execute(query)
while True:
row = rp.fetchone()
if row is None:
break
yield row[0]
def load(self, bind, value):
return {self.column.name: value}
def num_entries(self, conditions="1=1"):
""" Return the count of entries on the model fact table having the
dimension set to a value matching the filter given by ``conditions``.
"""
query = select([func.count(func.distinct(self.column_alias))],
conditions)
rp = self.model.bind.execute(query)
return rp.fetchone()[0]
def to_cubes(self, mappings, joins):
""" Convert this dimension to a ``cubes`` dimension. """
mappings['%s.%s' % (self.name, self.name)] = unicode(self.column)
return {
'levels': [{
'name': self.name,
'label': self.label,
'key': self.name,
'attributes': [self.name]
}]
}
class AttributeDimension(Dimension, Attribute):
""" A simple dimension that does not create its own values table
but keeps its values directly as columns on the facts table. This is
somewhat unusual for a star schema but appropriate for properties such as
transaction identifiers whose cardinality roughly equals that of the facts
table.
"""
def __init__(self, model, name, data):
Attribute.__init__(self, model, name, data)
Dimension.__init__(self, model, name, data)
def __repr__(self):
return "<AttributeDimension(%s)>" % self.name
def members(self, conditions="1=1", limit=None, offset=0):
""" Get a listing of all the members of the dimension (i.e. all the
distinct values) matching the filter in ``conditions``. """
query = select([self.column_alias], conditions,
limit=limit, offset=offset, distinct=True)
rp = self.model.bind.execute(query)
while True:
row = rp.fetchone()
if row is None:
break
yield row[0]
def num_entries(self, conditions="1=1"):
""" Return the count of entries on the model fact table having the
dimension set to a value matching the filter given by ``conditions``.
"""
query = select([func.count(func.distinct(self.column_alias))],
conditions)
rp = self.model.bind.execute(query)
return rp.fetchone()[0]
def to_cubes(self, mappings, joins):
""" Convert this dimension to a ``cubes`` dimension. """
mappings['%s.%s' % (self.name, self.name)] = unicode(self.column)
return {
'levels': [{
'name': self.name,
'label': self.label,
'key': self.name,
'attributes': [self.name]
}]
}
class Measure(Attribute):
""" A value on the facts table that can be subject to aggregation,
and is specific to this one fact. This would typically be some
financial unit, i.e. the amount associated with the transaction or
a specific portion thereof (i.e. co-financed amounts). """
def __init__(self, model, name, data):
Attribute.__init__(self, model, name, data)
self.label = data.get('label', name)
def __getitem__(self, name):
raise KeyError()
def join(self, from_clause):
return from_clause
def __repr__(self):
return "<Measure(%s)>" % self.name
class CompoundDimension(Dimension, TableHandler):
""" A compound dimension is an outer table on the star schema, i.e. an
associated table that is referenced from the fact table. It can have
any number of attributes but in the case of OpenSpending it will not
have sub-dimensions (i.e. snowflake schema).
"""
def __init__(self, model, name, data):
Dimension.__init__(self, model, name, data)
self.taxonomy = data.get('taxonomy', name)
self.attributes = []
for name, attr in data.get('attributes', {}).items():
self.attributes.append(Attribute(self, name, attr))
# TODO: possibly use a LRU later on?
self._pk_cache = {}
def join(self, from_clause):
""" This will return a query fragment that can be used to establish
an aliased join between the fact table and the dimension table.
"""
return from_clause.join(
self.alias, self.alias.c.id == self.column_alias)
def drop(self, bind):
""" Drop the dimension table and all data within it. """
self._drop(bind)
del self.column
@property
def column_alias(self):
""" This an aliased pointer to the FK column on the fact table. """
return self.model.alias.c[self.column.name]
@property
def selectable(self):
return self.alias
def __getitem__(self, name):
for attr in self.attributes:
if attr.name == name:
return attr
raise KeyError()
def init(self, meta, fact_table, make_table=True):
column = Column(self.name + '_id', Integer, index=True)
fact_table.append_column(column)
if make_table is True:
self._init_table(meta, self.model.source.dataset.name, self.name)
for attr in self.attributes:
attr.column = attr.init(meta, self.table)
alias_
|
open-machine-learning/mldata-utils
|
ml2h5/converter/basehandler.py
|
Python
|
gpl-3.0
| 11,241 | 0.006939 |
import os, h5py, numpy
from scipy.sparse import csc_matrix
import ml2h5.task
from ml2h5 import VERSION_MLDATA
from ml2h5.converter import ALLOWED_SEPERATORS
class BaseHandler(object):
"""Base handler class.
It is the base for classes to handle different data formats.
It implicitely handles HDF5.
@cvar str_type: string type to be used for variable length strings in h5py
@type str_type: numpy.dtype
@ivar fname: name of file to handle
@type fname: string
@ivar seperator: seperator to seperate variables in examples
@type seperator: string
"""
str_type = h5py.new_vlen(numpy.str)
def __init__(self, fname, seperator=None, compression=None, merge=False):
"""
@param fname: name of in-file
@type fname: string
@param seperator: seperator used to seperate examples
@type seperator: string
"""
self.fname = fname
self.compression = compression
self.set_seperator(seperator)
self.merge = merge
def set_seperator(self, seperator):
"""Set the seperator to seperate variables in examples.
@param seperator: seperator to use
@type seperator: string
"""
if seperator in ALLOWED_SEPERATORS:
self.seperator = seperator
else:
raise AttributeError(_("Seperator '%s' not allowed!" % seperator))
def warn(self, msg):
"""Print a warning message.
@param msg: message to print
@type msg: string
"""
return
print('WARNING: ' + msg)
def _convert_to_ndarray(self,path,val):
"""converts a attribut to a set of ndarrays depending on the datatype
@param path: path of the attribute in the h5 file
@type path: string
@param val: data of the attribute
@type val: csc_matrix/ndarray
@rtype: list of (string,ndarray) tuples
"""
A=val
out=[]
dt = h5py.special_dtype(vlen=str)
if type(A)==csc_matrix: # sparse
out.append((path+'_indices', A.indices))
out.append((path+'_indptr', A.indptr))
out.append((path, A.data))
elif type(A)==list and len(A)>0 and type(A[0])==str:
out.append((path, numpy.array(A, dtype=dt)))
else: # dense
out.append((path, numpy.array(A)))
return out
def get_data_as_list(self,data):
""" this needs to `transpose' the data """
dl=[]
group=self.get_data_group(data)
lengths=dict()
for o in data['ordering']:
x=data[group][o]
#if numpy.issubdtype(x.dtype, numpy.int):
# data[group][o]=x.astype(numpy.float64)
try:
lengths[o]=data[group][o].shape[1]
except (AttributeError, IndexError):
lengths[o]=len(data[group][o])
l=set(lengths.values())
assert(len(l)==1)
l=l.pop()
for i in range(l):
line=[]
for o in data['ordering']:
try:
line.extend(data[group][o][:,i])
except:
line.append(data[group][o][i])
dl.append(line)
return dl
def get_name(self):
"""Get dataset name from non-HDF5 file
@return: comment
@rtype: string
"""
# without str() it might barf
return str(os.path.basename(self.fname).split('.')[0])
def get_data_group(self, data):
if data and 'group' in data:
return data['group']
else:
return 'data'
def get_descr_group(self, data):
if data and 'group' in data:
return data['group'] + '_descr'
else:
return 'data_descr'
def get_datatype(self, values):
"""Get data type of given values.
@param values: list of values to check
@type values: list
@return: data type to use for conversion
@rtype: numpy.int32/numpy.double/self.str_type
"""
dtype = None
for v in values:
if isinstance(v, int):
dtype = numpy.int32
elif isinstance(v, float):
dtype = numpy.double
else: # maybe int/double in string
try:
tmp = int(v)
if not dtype: # a previous nan might set it to double
dtype = numpy.int32
except ValueError:
try:
tmp = float(v)
dtype = numpy.double
except ValueError:
return self.str_type
return dtype
def read(self):
"""Get data and description in-memory
Retrieve contents from file.
@return: example names, ordering and the examples
@rtype: dict of: list of names, list of ordering and dict of examples
"""
# we want the exception handled else
|
where
if not h5py.is_hdf5(self.fname):
return
h5 = h5py.File(self.fname, 'r')
contents = {
'name': h5
|
.attrs['name'],
'comment': h5.attrs['comment'],
'mldata': h5.attrs['mldata'],
}
if contents['comment']=='Task file':
contents['task']=dict()
contents['ordering']=list()
group='task'
for field in ml2h5.task.task_data_fields:
if field in h5[group]:
contents['ordering'].append(field)
else:
contents['data']=dict()
contents['ordering']=h5['/data_descr/ordering'][...].tolist()
group='data'
contents['group']=group
if '/%s_descr/names' % group in h5:
contents['names']=h5['/%s_descr/names' % group][...].tolist()
if '/%s_descr/types' % group in h5:
contents['types'] = h5['/%s_descr/types' % group ][...]
for name in contents['ordering']:
vname='/%s/%s' % (group, name)
sp_indices=vname+'_indices'
sp_indptr=vname+'_indptr'
if sp_indices in h5['/%s' % group] and sp_indptr in h5['/%s' % group]:
contents[group][name] = csc_matrix((h5[vname], h5[sp_indices], h5[sp_indptr])
)
else:
d = numpy.array(h5[vname],order='F')
try:
d=d['vlen']
except:
pass
contents[group][name] = d
h5.close()
return contents
def read_data_as_array(self):
"""Read data from file, and return an array
@return: an array with all data
@rtype: numpy ndarray
"""
contents = self.read()
#group = self.get_data_group(data)
data = contents['data']
ordering = contents['ordering']
if len(data[ordering[0]].shape)>1:
num_examples = data[ordering[0]].shape[1]
else:
num_examples = len(data[ordering[0]])
data_array = numpy.zeros((0, num_examples))
for cur_feat in ordering:
data_array = numpy.vstack([data_array, data[cur_feat]])
return data_array.T
def _get_merged(self, data):
"""Merge given data where appropriate.
String arrays are not merged, but all int and all double are merged
into one matrix.
@param data: data structure as returned by read()
@type data: dict
@return: merged data structure
@rtype: dict
"""
merged = {}
ordering = []
path = ''
idx = 0
merging = None
group = self.get_data_group(data)
for name in data['ordering']:
val = data[group][name]
if type(val) == csc_matrix:
merging = None
path = name
merged[path] = val
ordering.append(path)
continue
if name.endswith('_indices') or name.endswith('_indptr'):
merging = None
path
|
kohr-h/odl
|
examples/solvers/proximal_lang_tomography.py
|
Python
|
mpl-2.0
| 2,158 | 0 |
"""Tomography with TV regularization using the ProxImaL solver.
Solves the optimization problem
min_{0 <= x <= 1} ||A(x) - g||_2^2 + 0.2 || |grad(x)| ||_1
Where ``A`` is a parallel beam forward projector, ``grad`` the spatial
gradient and ``g`` is given noisy data.
"""
import numpy as np
import odl
import proximal
# --- Set up the forward operator (ray transform) --- #
# Reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300], dtype='float32')
# Make a parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = pi
angle_partition = odl.uniform_partition(0, np.pi, 360)
# Detector: uniformly sampled, n = 512, min = -30, max = 30
detector_partition = odl.uniform_partition(-30, 30, 512)
geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
# Initialize the ray transform (forward projection).
ray_trafo = odl.tomo.RayTransform(reco_space, geometry)
# Convert ray transform to proximal language operator
proximal_lang_ray_trafo = odl.as_proximal_lang_operator(ray_trafo)
# Create sinogram of forward projected phantom with noise
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
phantom.show('phantom')
data = ray_trafo(phantom)
data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1
data.show('noisy data')
# Convert to array for ProxImaL
rhs_arr = data.asarray()
# Set up optimization problem
# Note that proximal is not aware of the underlying space and only works with
# matrices. Hence the norm in proximal does not match the norm in the ODL space
# exactly.
x = proximal.Variable(reco_space.shape)
funcs = [proximal.sum_squares(proximal_lang_ray_trafo(x) - rhs_arr),
0.2 * proximal.norm1(proximal.grad(x)),
proximal.nonneg(x),
proximal.nonneg(1
|
- x)]
# Solve the problem using ProxImaL
prob = proximal.Problem(funcs)
prob.s
|
olve(verbose=True)
# Convert back to odl and display result
result_odl = reco_space.element(x.value)
result_odl.show('ProxImaL result', force_show=True)
|
yejianye/microblog
|
asura/conf/dev.py
|
Python
|
mit
| 170 | 0 |
SQLALCHEMY
|
_DATABASE_URI = 'mysql+pymysql://root@localhost:3306/microblog'
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS_HOST = 'loca
|
lhost'
REDIS_PORT = 6379
DEBUG = False
|
annahs/atmos_research
|
sqlite_test.py
|
Python
|
mit
| 1,520 | 0.028289 |
import sqlite3
from datetime import datetime
from pprint import pprint
import sys
import numpy as np
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
#sp2b_file TEXT, eg 20120405x001.sp2b
#file_index INT,
#instr TEXT, eg UBCSP2, ECSP2
#instr_locn TEXT, eg WHI, DMT, POLAR6
#particle_type TEXT, eg PSL, nonincand, incand, Aquadag
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#zero_crossing_posn FLOAT,
#coat_thickness_from_actual_scat_amp FLOAT,
#FF_fit_function TEXT,
#LF_fit_function TEXT,
#zeroX_to_LEO_limit FLOAT
#UNIQUE (sp2b_file, file_index, instr)
#)''')
|
#c.execute('''ALTER TABLE SP2_coating_analysis ADD COLUMN FF_fit_function TEXT''')
#c.execute('''ALTER TABLE SP2_coating_analysis ADD COLUMN zeroX_to_LEO_limit FLOAT''')
#c.execute('''CREATE INDEX SP2_coating_analysis_index1 ON SP2_coating_analysis(instr,instr_locn,par
|
ticle_type,unix_ts_utc,unix_ts_utc,FF_gauss_width,zeroX_to_peak)''')
#c.execute('''SELECT * FROM SP2_coating_analysis''')
c.execute('''DELETE FROM SP2_coating_analysis WHERE instr=? and instr_locn=? and particle_type=?''', ('UBCSP2', 'POLAR6','nonincand' ))
#names = [description[0] for description in c.description]
#pprint(names)
#print c.fetchone()
#
conn.close()
|
leppa/home-assistant
|
tests/components/yessssms/test_notify.py
|
Python
|
apache-2.0
| 12,228 | 0.001063 |
"""The tests for the notify yessssms platform."""
import logging
import unittest
from unittest.mock import patch
import pytest
import requests_mock
from homeassistant.components.yessssms.const import CONF_PROVIDER
import homeassistant.components.yessssms.notify as yessssms
from homeassistant.const import CONF_PASSWORD, CONF_RECIPIENT, CONF_USERNAME
from homeassistant.setup import async_setup_component
@pytest.fixture(name="config")
def config_data():
"""Set valid config data."""
config = {
"notify": {
"platform": "yessssms",
"name": "sms",
CONF_USERNAME: "06641234567",
CONF_PASSWORD: "secretPassword",
CONF_RECIPIENT: "06509876543",
CONF_PROVIDER: "educom",
}
}
return config
@pytest.fixture(name="valid_settings")
def init_valid_settings(hass, config):
"""Initialize component with valid settings."""
return async_setup_component(hass, "notify", config)
@pytest.fixture(name="invalid_provider_settings")
def init_invalid_provider_settings(hass, config):
"""Set invalid provider data and initalize component."""
config["notify"][CONF_PROVIDER] = "FantasyMobile" # invalid provider
return async_setup_component(hass, "notify", config)
@pytest.fixture(name="invalid_login_data")
def mock_invalid_login_data():
"""Mock invalid login data."""
path = "homeassistant.components.yessssms.notify.YesssSMS.login_data_valid"
with patch(path, return_value=False):
yield
@pytest.fixture(name="valid_login_data")
def mock_valid_login_data():
"""Mock valid login data."""
path = "homeassistant.components.yessssms.notify.YesssSMS.login_data_valid"
with patch(path, return_value=True):
yield
@pytest.fixture(name="connection_error")
def mock_connection_error():
"""Mock a connection error."""
path = "homeassistant.components.yessssms.notify.YesssSMS.login_data_valid"
with patch(path, side_effect=yessssms.YesssSMS.ConnectionError()):
yield
async def test_unsupported_provider_error(hass, caplog, invalid_provider_settings):
"""Test for error on unsupported provider."""
await invalid_provider_settings
for record in caplog.records:
if (
record.levelname == "ERROR"
and record.name == "homeassistant.components.yessssms.notify"
):
assert (
"Unknown provider: provider (fantasymobile) is not known to YesssSMS"
in record.message
)
assert (
"Unknown provider: provider (fantasymobile) is not known to YesssSMS"
in caplog.text
)
assert not hass.services.has_service("notify", "sms")
async def test_false_login_data_error(hass, caplog, valid_settings, invalid_login_data):
"""Test login data check error."""
await valid_settings
assert not hass.services.has_service("notify", "sms")
for record in caplog.records:
if (
record.levelname == "ERROR"
and record.name == "homeassistant.components.yessssms.notify"
):
assert (
"Login data is not valid! Please double check your login data at"
in record.message
)
async def test_init_success(hass, caplog, valid_settings, valid_login_data):
"""Test for successful init of yessssms."""
caplog.set_level(logging.DEBUG)
await valid_settings
assert hass.services.has_service("notify", "sms")
messages = []
for record in caplog.records:
if (
record.levelname == "DEBUG"
and record.name == "homeassistant.components.yessssms.notify"
):
messages.append(record.message)
assert "Login data for 'educom' valid" in messages[0]
assert (
"initialized; library version: {}".format(yessssms.YesssSMS("", "").version())
in messages[1]
)
async def test_connection_error_on_init(hass, caplog, valid_settings, connection_error):
"""Test for connection error on init."""
caplog.set_level(logging.DEBUG)
await valid_settings
assert hass.services.has_service("notify", "sms")
for record in caplog.records:
if (
record.levelname == "WARNING"
and record.name == "homeassistant.components.yessssms.notify"
):
assert (
"Connection Error, could not verify login data for '{}'".format(
"educom"
)
in record.message
)
for record in caplog.records:
if (
record.levelname == "DEBUG"
and record.name == "homeassistant.components.yessssms.notify"
):
assert (
"initialized; library version: {}".format(
yessssms.YesssSMS("", "").version()
)
in record.message
)
class TestNotifyYesssSMS(unittest.TestCase):
"""Test the yessssms notify."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
login = "06641234567"
passwd = "testpasswd"
recipient = "06501234567"
client = yessssms.YesssSMS(login, passwd)
self.yessssms = yessssms.YesssSMSNotificationService(client, recipient)
@requests_mock.Mocker()
def test_login_error(self, mock):
"""Test login that fails."""
mock.register_uri(
requests_mock.POST,
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=200,
text="BlaBlaBla<strong>Login nicht erfolgreichBlaBla",
)
message = "Testing YesssSMS platform :)"
with self.assertLogs("homeassistant.components.yessssms.notify", level="ERROR"):
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 1)
def test_empty_message_error(self):
"""Test for an empty SMS message error."""
message = ""
with self.assertLogs("homeassistant.components.yessssms.notify", level="ERROR"):
self.yessssms.send_message(message)
@requests_mock.Mocker()
def test_error_account_suspended(self, mock):
"""Test login that fails after multiple attempts."""
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=200,
text="BlaBlaBla<strong>Login nicht erfolgreichBlaBla",
)
message = "Testing YesssSMS platform :)"
with self.assertLogs("homeassistant.components.yessssms.notify", level="ERROR"):
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 1)
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=200,
text="Wegen 3 ungültigen Login-Versuchen ist Ihr Account für "
"eine Stunde gesperrt.",
)
message = "Testing YesssSMS platform :)"
with self.assertLogs("homeassistant.components.yessssms.noti
|
fy", level="ERROR"):
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
def test_error_account_suspended_2(self):
"""Test login that fails after multiple attempts."""
message = "Testing YesssSMS platform :)"
# pylint: disable=protected-access
self.yessssms.yesss._suspended = True
with self.assertLogs(
"homeassistant.components.yessss
|
ms.notify", level="ERROR"
) as context:
self.yessssms.send_message(message)
self.assertIn("Account is suspended, cannot send SMS.", context.output[0])
@requests_mock.Mocker()
def test_send_message(self, mock):
"""Test send message."""
message = "Testing YesssSMS platform :)"
mock.register_uri(
"POST",
# pylint: disable=protected-access
se
|
looprock/Megaphone
|
sample_service.py
|
Python
|
isc
| 1,823 | 0.016456 |
#!/usr/bin/env python
import json
import sys
import os
from bottle import route, run, get
import time
import httplib
server = "127.0.0.1"
statport = "18081"
host = "%s:18001" % server
staturl = "http://%s:%s/status" % (server,statport)
blob = {"id": "bar", "url": staturl}
data = json.dumps(blob)
connection = httplib.HTTPConnection(host)
connection.request('POST', '/checks', data)
result = connection.getresponse()
print "RESULT: %s - %s" % (result.status, result.reason)
def usage():
print "%s [status: OK,Unknown,Warning,Critical]" % (sys.argv[0])
msgs = {
"OK": "Everything is gro
|
ovy!",
"Unknown": "Unknown error!",
"Warning": "Houstin, I think we have a warning!",
"Critical": "Danger Will Rogers! Danger!"
}
t = len(sys.argv)
if t < 2:
usage()
sys.exit(1)
else:
statusm = sys.argv[1]
t = time.localtime()
ts = time.strftime('%Y-%m-%dT%H:%M:%S%Z', t)
rootdir = "./"
# Change working directory so relative paths (and template lookup) work again
root = os.path.join(os.path.dirname(__file__))
sy
|
s.path.insert(0, root)
# generate nested python dictionaries, copied from here:
# http://stackoverflow.com/questions/635483/what-is-the-best-way-to-implement-nested-dictionaries-in-python
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
@get('/status')
def status():
data = AutoVivification()
data['id'] = "bar"
data['status'] = statusm
data['date'] = ts
data['message'] = msgs[statusm]
data['version'] = "1.0.0"
return data
run(host='localhost', port=statport, debug=True)
|
vermouth1992/Leetcode
|
python/594.longest-harmonious-subsequence.py
|
Python
|
mit
| 1,636 | 0.014059 |
#
# @lc app=leetcode id=594 lang=python3
#
# [594] Longest Harmonious Subsequence
#
# https://leetcode.com/problems/longest-harmonious-subsequence/description/
#
# algorithms
# Easy (51.44%)
# Total Accepted: 97.9K
# Total Submissions: 190.2K
# Testcase Example: '[1,3,2,2,5,2,3,7]'
#
# We define a harmonious array as an array where the difference between its
# maximum value and its minimum value is exactly 1.
#
# Giv
|
en an integer array nums, return the length of its longest harmonious
# subsequence among all its possible subsequences.
#
# A subsequenc
|
e of array is a sequence that can be derived from the array by
# deleting some or no elements without changing the order of the remaining
# elements.
#
#
# Example 1:
#
#
# Input: nums = [1,3,2,2,5,2,3,7]
# Output: 5
# Explanation: The longest harmonious subsequence is [3,2,2,2,3].
#
#
# Example 2:
#
#
# Input: nums = [1,2,3,4]
# Output: 2
#
#
# Example 3:
#
#
# Input: nums = [1,1,1,1]
# Output: 0
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 2 * 10^4
# -10^9 <= nums[i] <= 10^9
#
#
#
from typing import List
class Solution:
def findLHS(self, nums: List[int]) -> int:
num_to_freq = dict()
for num in nums:
if num not in num_to_freq:
num_to_freq[num] = 0
num_to_freq[num] += 1
lhs = 0
for num, freq in num_to_freq.items():
num_add_one_freq = num_to_freq.get(num + 1, -1)
if num_add_one_freq != -1:
curr_lhs = freq + num_add_one_freq
if curr_lhs > lhs:
lhs = curr_lhs
return lhs
|
chen0031/nupic
|
tests/unit/nupic/encoders/random_distributed_scalar_test.py
|
Python
|
agpl-3.0
| 19,698 | 0.004315 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from cStringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import numpy
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaType
from nupic.support.unittesthelpers.algorithm_test_helpers import getSeed
from nupic.encoders.random_distributed_scalar import (
RandomDistributedScalarEncoder
)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.random_distributed_scalar_capnp import (
RandomDistributedScalarEncoderProto
)
# Disable warnings about accessing protected members
# pylint: disable=W0212
def computeOverlap(x, y):
"""
Given two binary arrays, compute their overlap. The overlap is the number
of bits where x[i] and y[i] are both 1
"""
return (x & y).sum()
def validateEncoder(encoder, subsampling):
"""
Given an encoder, calculate overlaps statistics and ensure everything is ok.
We don't check every possible combination for speed reasons.
"""
for i in range(encoder.minIndex, encoder.maxIndex+1, 1):
for j in range(i+1, encoder.maxIndex+1, subsampling):
if not encoder._overlapOK(i, j):
return False
return True
class RandomDistributedScalarEncoderTest(unittest.TestCase):
"""
Unit tests for RandomDistributedScalarEncoder class.
"""
def testEncoding(self):
"""
Test basic encoding functionality. Create encodings without crashing and
check they contain the correct number of on and off bits. Check some
encodings for expected overlap. Test that encodings for old values don't
change once we generate new buckets.
"""
# Initialize with non-default parameters and encode with a number close to
# the offset
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=23, n=500, offset=0.0)
e0 = encoder.encode(-0.1)
self.assertEqual(e0.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e0.size, 500, "Width of the vector is incorrect")
self.assertEqual(encoder.getBucketIndices(0.0)[0], encoder._maxBuckets / 2,
"Offset doesn't correspond to middle bucket")
self.assertEqual(len(encoder.bucketMap), 1, "Number of buckets is not 1")
# Encode with a number that is resolution away from offset. Now we should
# have two buckets and this encoding should be one bit away from e0
e1 = encoder.encode(1.0)
self.assertEqual(len(encoder.bucketMap), 2, "Number of buckets is not 2")
self.assertEqual(e1.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e1.size, 500, "Width of the vector is incorrect")
self.assertEqual(computeOverlap(e0, e1), 22, "Overlap is not equal to w-1")
# Encode with a number that is resolution*w away from offset. Now we should
# have many buckets and this encoding should have very little overlap with
# e0
e25 = encoder.encode(25.0)
|
self.assertGreater(len(encoder.bucketMap), 23,
|
"Number of buckets is not 2")
self.assertEqual(e25.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e25.size, 500, "Width of the vector is incorrect")
self.assertLess(computeOverlap(e0, e25), 4, "Overlap is too high")
# Test encoding consistency. The encodings for previous numbers
# shouldn't change even though we have added additional buckets
self.assertTrue(numpy.array_equal(e0, encoder.encode(-0.1)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
self.assertTrue(numpy.array_equal(e1, encoder.encode(1.0)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
def testMissingValues(self):
"""
Test that missing values and NaN return all zero's.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
empty = encoder.encode(float("nan"))
self.assertEqual(empty.sum(), 0)
def testResolution(self):
"""
Test that numbers within the same resolution return the same encoding.
Numbers outside the resolution should return different encodings.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
# Since 23.0 is the first encoded number, it will be the offset.
# Since resolution is 1, 22.9 and 23.4 should have the same bucket index and
# encoding.
e23 = encoder.encode(23.0)
e23p1 = encoder.encode(23.1)
e22p9 = encoder.encode(22.9)
e24 = encoder.encode(24.0)
self.assertEqual(e23.sum(), encoder.w)
self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
e22p9 = encoder.encode(22.5)
self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
def testMapBucketIndexToNonZeroBits(self):
"""
Test that mapBucketIndexToNonZeroBits works and that max buckets and
clipping are handled properly.
"""
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150)
# Set a low number of max buckets
encoder._initializeBucketMap(10, None)
encoder.encode(0.0)
encoder.encode(-7.0)
encoder.encode(7.0)
self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets,
"_maxBuckets exceeded")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(-1),
encoder.bucketMap[0]),
"mapBucketIndexToNonZeroBits did not handle negative"
" index")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(1000),
encoder.bucketMap[9]),
"mapBucketIndexToNonZeroBits did not handle negative index")
e23 = encoder.encode(23.0)
e6 = encoder.encode(6)
self.assertEqual((e23 == e6).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
ep8 = encoder.encode(-8)
ep7 = encoder.encode(-7)
self.assertEqual((ep8 == ep7).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
self.assertEqual(encoder.getBucketIndices(-8)[0], 0,
"getBucketIndices returned negative bucket index")
self.assertEqual(encoder.getBucketIndices(23)[0], encoder._maxBuckets-1,
"getBucketIndices returned bucket index that is too"
" large")
def testParameterChecks(self):
"""
Test that some bad construction parameters get handled.
"""
# n must be >= 6*w
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=int(5.9*21))
# n must be an int
with s
|
nlproc/splunkml
|
bin/nlcluster.py
|
Python
|
apache-2.0
| 4,186 | 0.029384 |
#!env python
import os
import sys
sys.path.append(
os.path.join(
os.environ.get( "SPLUNK_HOME", "/opt/splunk/6.1.3" ),
"etc/apps/framework/contrib/splunk-sdk-python/1.3.0",
)
)
from collections import Counter, OrderedDict
from math import log
from nltk import tokenize
import execnet
import json
from splunklib.searchcommands import Configuration, Option
from splunklib.searchcommands import dispatch, validators
from remote_commands import OptionRemoteStreamingCommand
class FloatValidator(validators.Integer):
def __init__(self, **kwargs):
super(FloatValidator, self).__init__(**kwargs)
def __call__(self, value):
if value is not None:
value = float(value)
self.check_range(value)
return value
@Configuration(clear_required_fields=True, overrides_timeorder=True)
class NLCluster(OptionRemoteStreamingCommand):
alg = Option(require=False, default="mean_shift")
model = Option(require=False, default="lsi")
# threshold = Option(require=False, default=0.01, validate=FloatValidator(minimum=0,maximum=1))
code = """
import sys, os, numbers
try:
import cStringIO as StringIO
except:
import StringIO
import numpy as np
import scipy.sparse as sp
from gensim.corpora import TextCorpus, Dictionary
from gensim.models import LsiModel, TfidfModel, LdaModel
from gensim.similarities import SparseMatrixSimilarity
from gensim.matutils import corpus2dense
from sklearn import cluster, covariance
if __name__ == "__channelexec__":
args = channel.receive()
# threshold = args['threshold']
fields = args.get('fieldnames') or ['_raw']
records = []
for record in channel:
if not record:
break
records.append(record)
def is_n
|
umber(str):
try:
n = float(str)
|
return True
except ValueError:
return False
need_sim = args['alg'] in {'affinity_propagation','spectral'}
if records:
records = np.array(records)
input = None # StringIO.StringIO()
X = None # sp.lil_matrix((len(records),len(fields)))
for i, record in enumerate(records):
nums = []
strs = []
for field in fields:
if isinstance(record.get(field), numbers.Number):
nums.append(record[field])
elif is_number(record.get(field) or ""):
nums.append(record[field])
else:
strs.append(str(record.get(field) or "").lower())
if strs:
if input is None:
input = StringIO.StringIO()
print >> input, " ".join(strs)
else:
if X is None:
X = sp.lil_matrix((len(records),len(fields)))
X[i] = np.array(nums, dtype=np.float64)
if input is not None:
corpus = TextCorpus(input)
if args['alg'] == 'spectral':
args['model'] = 'tfidf'
if args['model'] == 'lsi':
model = LsiModel(corpus)
elif args['model'] == 'tfidf':
model = TfidfModel(corpus)
## Disable this for now
#
# elif args['model'] == 'lda':
# model = LdaModel(corpus)
#
##
else:
model = None
# TODO: Persist model?
if model:
num_terms = len(model.id2word or getattr(model, 'dfs',[]))
if need_sim:
index = SparseMatrixSimilarity(model[corpus], num_terms=num_terms)
X = index[corpus].astype(np.float64)
else:
X = corpus2dense(model[corpus], num_terms)
else:
channel.send({ 'error': "Unknown model %s" % args['model']})
else:
X = X.toarray()
if need_sim:
model = covariance.EmpiricalCovariance()
model.fit(X)
X = model.covariance_
if X is not None:
if args['alg'] == 'affinity_propagation':
_, labels = cluster.affinity_propagation(X)
elif args['alg'] == "mean_shift":
_, labels = cluster.mean_shift(X)
elif args['alg'] == 'spectral':
labels = cluster.spectral_clustering(X)
elif args['alg'] == 'dbscan':
_, labels = cluster.dbscan(X)
else:
labels = None
if labels != None:
n_labels = labels.max()
clustered = []
for i in range(n_labels + 1):
clust = records[labels == i]
record = clust[0]
record['cluster_label'] = i + 1
record['cluster_size'] = len(clust)
channel.send(record)
else:
channel.send({ 'error': "Unknown algorithm %s" % args['alg']})
"""
def __dir__(self):
return ['alg', 'model']
dispatch(NLCluster, sys.argv, sys.stdin, sys.stdout, __name__)
|
ahye/FYS2140-Resources
|
examples/plotting/plot_initial.py
|
Python
|
mit
| 964 | 0.018672 |
#!/usr/bin/env python
"""
Created on Mon 2 Dec 2013
Plotter grunntilstanden for en harmonisk oscillator.
@author Benedicte Emilie Brakken
"""
from numpy import *
from matplotlib.pyplot import *
# Kun for aa teste
omega = 1 # [rad / s]
# Fysiske parametre
hbarc = 0.1973 # [MeV pm]
E0p = 938.27 # [MeV]
c = 3e2 # [pm / as]
# x-verdier
x = linspace( -pi, pi, 1e4 )
def Psi0( x ):
'''
Grunntilstanden for en harmoni
|
sk oscillator.
'''
A = ( E0p * omega / ( pi * hbarc * c ) )**0.25
B = exp( - E0p * omega / ( 2 * hbarc * c) * x**2 )
return A * B
# Henter funksjonsverdier og lagrer i arrayen Psi
Psi = Psi0(x)
# Lager et nytt figurvindu
figure()
# Plotter x mot Psi0
plot( x, abs( Psi )**2 )
# Tekst langs x-aksen
xlabel('$x$ [pm]')
# Tek
|
st langs y-aksen
ylabel('$|\Psi_0 (x, 0)|^2$ [1/pm]')
# Tittel paa plottet
title('Grunntilstanden for harmonisk oscillator')
# Viser det vi har plottet
show()
|
mhbu50/erpnext
|
erpnext/regional/france/utils.py
|
Python
|
gpl-3.0
| 222 | 0.018018 |
# Copyright (c) 2018, Fra
|
ppe Technologies and contributors
# For license information, please see license.txt
# don't remove this function it
|
is used in tests
def test_method():
'''test function'''
return 'overridden'
|
mozman/ezdxf
|
src/ezdxf/entities/view.py
|
Python
|
mit
| 5,728 | 0.000698 |
# Copyright (c) 2019-2022, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING
import logging
from ezdxf.lldxf import validator
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
XType,
RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER, DXF2000, DXF2007, DXF2010
from ezdxf.math import Vec3, NULLVEC
from ezdxf.entities.dxfentity import base_class, SubclassProcessor, DXFEntity
from ezdxf.entities.layer import acdb_symbol_table_record
from .factory import register_entity
logger = logging.getLogger("ezdxf")
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, DXFNamespace
__all__ = ["View"]
acdb_view = DefSubclass(
"AcDbViewTableRecord",
{
"name": DXFAttr(2, validator=validator.is_valid_table_name),
"flags": DXFAttr(70, default=0),
"height": DXFAttr(40, default=1),
"width": DXFAttr(41, default=1),
"center": DXFAttr(10, xtype=XType.point2d, default=NULLVEC),
"direction": DXFAttr(
11,
xtype=XType.point3d,
default=Vec3(1, 1, 1),
validator=validator.is_not_null_vector,
),
"target": DXFAttr(12, xtype=XType.point3d, default=NULLVEC),
"focal_length": DXFAttr(42, default=50),
"front_clipping": DXFAttr(43, default=0),
"back_clipping": DXFAttr(44, default=0),
"view_twist": DXFAttr(50, default=0),
"view_mode": DXFAttr(71, default=0),
# Render mode:
# 0 = 2D Optimized (classic 2D)
# 1 = Wireframe
# 2 = Hidden line
# 3 = Flat shaded
# 4 = Gouraud shaded
# 5 = Flat shaded with wireframe
# 6 = Gouraud shaded with wireframe
"render_mode": DXFAttr(
281,
default=0,
dxfversion=DXF2000,
validator=validator.is_in_integer_range(0, 7),
fixer=RETURN_DEFAULT,
),
# 1 if there is an UCS associated to this view, 0 otherwise.
"ucs": DXFAttr(
72,
default=0,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
"ucs_origin": DXFAttr(110, xtype=XType.point3d, dxfversion=DXF2000),
"ucs_xaxis": DXFAttr(
111,
xtype=XType.point3d,
dxfversion=DXF2000,
validator=validator.is_not_null_vector,
),
"ucs_yaxis": DXFAttr(
112,
xtype=XType.point3d,
dxfversion=DXF2000,
validator=validator.is_not_null_vector,
),
# 0 = UCS is not orthographic
# 1 = Top
# 2 = Bottom
# 3 = Front
# 4 = Back
# 5 = Left
# 6 = Right
"ucs_ortho_type": DXFAttr(
79,
dxfversion=DXF2000,
validator=validator.is_in_integer_range(0, 7),
fixer=lambda x: 0,
),
"elevation": DXFAttr(146, dxfversion=DXF2000, default=0),
# handle of AcDbUCSTableRecord if UCS is a named UCS. If not present,
# then UCS is unnamed:
"ucs_handle": DXFAttr(345, dxfversion=DXF2000),
# handle of AcDbUCSTableRecord of base UCS if UCS is orthographic (79 code
# is non-zero). If not present and 79 code is non-zero, then base UCS is
# taken to be WORLD
"base_ucs_handle": DXFAttr(346, dxfversion=DXF2000),
# 1 if the camera is plottable
"camera_plottable": DXFAttr(
73,
default=0,
dxfversion=DXF2007,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
"background_handle": DXFAttr(332, optional=True, dxfversion=DXF2007),
"live_selection_handle": DXFAttr(
334, optional=True, dxfversion=DXF2007
),
"visual_style_handle": DXFAttr(348, optional=True, dxfversion=DXF2007),
"sun_handle": DXFAttr(361, optional=True, dxfversion=DXF2010),
},
)
acdb_view_group_codes = group_code_mapping(acdb_view)
@register_entity
class View(DXFEntity):
"""DXF VIEW entity"""
DXFTYPE = "VIEW"
DXFATTRIBS = DXFAttributes(base_class, acdb_symbol_table_record, acdb_view)
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor:
|
processor.simple_dxfattribs_loader(dxf, acdb_view_group_codes) # type: ignore
return dxf
def export_entity(self, tagwriter: "TagWriter") -> None:
super().export_entity(tagwriter)
if tagwriter.dxfversion > DXF12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_symbol_table_record.name)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_view.name)
self.dxf.export_dxf_attribs(
|
tagwriter,
[
"name",
"flags",
"height",
"width",
"center",
"direction",
"target",
"focal_length",
"front_clipping",
"back_clipping",
"view_twist",
"view_mode",
"render_mode",
"ucs",
"ucs_origin",
"ucs_xaxis",
"ucs_yaxis",
"ucs_ortho_type",
"elevation",
"ucs_handle",
"base_ucs_handle",
"camera_plottable",
"background_handle",
"live_selection_handle",
"visual_style_handle",
"sun_handle",
],
)
|
opennode/nodeconductor-openstack
|
src/waldur_openstack/openstack_tenant/extension.py
|
Python
|
mit
| 1,984 | 0.000504 |
from waldur_core.core import WaldurExtension
class OpenStackTenantExtension(WaldurExtension):
class Settings:
# wiki: https://opennode.atlassian.net/wiki/display/WD/OpenStack+plugin+configuration
WALDUR_OPENSTACK_TENANT = {
'MAX_CONCURRENT_PROVISION': {
'OpenStackTenant.Instance': 4,
'OpenStackTenant.Volume': 4,
'OpenStackTenant.Snapshot': 4,
},
}
@staticmethod
def django_app():
return 'waldur_openstack.openstack_tenant'
@staticmethod
def rest_urls():
from .urls import register_in
return register_in
@staticmethod
def celery_tasks():
from datetime import timedelta
return {
'openstacktenant-schedule-backups': {
'task': 'openstack_tenant.ScheduleBackups',
'schedule': timedelta(minutes=10),
'args': (),
},
'openstacktenant-delete-expired-backups': {
'task': 'openstack_tenant.DeleteExpiredBackups',
'schedule': timedelta(minutes=10),
'args'
|
: (),
},
'openstacktenant-schedule-snapshots': {
'task': 'openstack_tenant.ScheduleSnapshots',
'schedule': timedelta(minutes=10),
'args': (),
},
'openstacktenant-delete-expired-snapshots': {
'task': 'op
|
enstack_tenant.DeleteExpiredSnapshots',
'schedule': timedelta(minutes=10),
'args': (),
},
'openstacktenant-set-erred-stuck-resources': {
'task': 'openstack_tenant.SetErredStuckResources',
'schedule': timedelta(minutes=10),
'args': (),
},
}
@staticmethod
def get_cleanup_executor():
from .executors import OpenStackTenantCleanupExecutor
return OpenStackTenantCleanupExecutor
|
bigswitch/nova
|
nova/objects/instance_numa_topology.py
|
Python
|
apache-2.0
| 8,588 | 0.000116 |
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields as obj_fields
from nova.virt import hardware
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMACell(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add pagesize field
# Version 1.2: Add cpu_pinning_raw and topology fields
# Version 1.3: Add cpu_policy and cpu_thread_policy fields
VERSION = '1.3'
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMACell, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 3):
primitive.pop('cpu_policy', None)
primitive.pop('cpu_thread_policy', None)
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True),
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True),
'cpu_policy': obj_fields.CPUAllocationPolicyField(nullable=True),
'cpu_thread_policy': obj_fields.CPUThreadAllocationPolicyField(
nullable=True),
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __init__(self, **kwargs):
super(InstanceNUMACell, self).__init__(**kwargs)
if 'pagesize' not in kwargs:
self.pagesize = None
self.obj_reset_changes(['pagesize'])
if 'cpu_topology' not in kwargs:
self.cpu_topology = None
self.obj_reset_changes(['cpu_topology'])
if 'cpu_pinning' not in kwargs:
self.cpu_pinning = None
self.obj_reset_changes(['cpu_pinning_raw'])
if 'cpu_policy' not in kwargs:
self.cpu_policy = None
self.obj_reset_changes(['cpu_policy'])
if 'cpu_thread_policy' not in kwargs:
self.cpu_thread_policy = None
self.obj_reset_changes(['cpu_thread_policy'])
def __len__(self):
return len(self.cpuset)
def _to_dict(self):
#
|
NOTE(sahid): Used as legacy, could be renamed in
# _legacy_to_dict_ to the future to avoid confusing.
return {'cpus': hardware.format_cpu_spec(self.cpuset,
allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id,
'pagesize': self.pagesize}
@classmethod
def _from_dict(cls, d
|
ata_dict):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_from_dict_ to the future to avoid confusing.
cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
pagesize = data_dict.get('pagesize')
return cls(id=cell_id, cpuset=cpuset,
memory=memory, pagesize=pagesize)
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
threads = 0
if self.cpu_topology:
threads = self.cpu_topology.threads
if threads == 1:
threads = 0
return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
self.cpu_pinning = pinning_dict
def pin_vcpus(self, *cpu_pairs):
for vcpu, pcpu in cpu_pairs:
self.pin(vcpu, pcpu)
def clear_host_pinning(self):
"""Clear any data related to how this cell is pinned to the host.
Needed for aborting claims as we do not want to keep stale data around.
"""
self.id = -1
self.cpu_pinning = {}
return self
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Takes into account pagesize
# Version 1.2: InstanceNUMACell 1.2
VERSION = '1.2'
fields = {
# NOTE(danms): The 'id' field is no longer used and should be
# removed in the future when convenient
'id': obj_fields.IntegerField(),
'instance_uuid': obj_fields.UUIDField(),
'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'),
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'nova_object.name' in primitive:
obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(
primitive, context=None)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = InstanceNUMATopology._from_dict(primitive)
obj_topology.id = 0
return obj_topology
@classmethod
def obj_from_db_obj(cls, instance_uuid, db_obj):
primitive = jsonutils.loads(db_obj)
obj_topology = cls.obj_from_primitive(primitive)
if 'nova_object.name' not in db_obj:
obj_topology.instance_uuid = instance_uuid
# No benefit to store a list of changed fields
obj_topology.obj_reset_changes()
return obj_topology
# TODO(ndipanov) Remove this method on the major version bump to 2.0
@base.remotable
def create(self):
values = {'numa_topology': self._to_json()}
db.instance_extra_update_by_uuid(self._context, self.instance_uuid,
values)
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['numa_topology'])
if not db_extra:
raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)
if db_extra['numa_topology'] is None:
return None
return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology'])
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_to_dict_
# in the future to avoid confusing.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_
# in the future to avoid confusing.
return cls(cells=[
InstanceNUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
|
pierre-haessig/sysdiag
|
sysdiag.py
|
Python
|
mit
| 21,428 | 0.007049 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" sysdiag
Pierre Haessig — September 2013
"""
from __future__ import division, print_function
def _create_name(name_list, base):
'''Returns a name (str) built on `base` that doesn't exist in `name_list`.
Useful for automatic creation of subsystems or wires
'''
base = str(base).strip()
if base == '':
# avoid having '' as name (although it would not break the code...)
raise ValueError('base name should not be empty!')
if base not in name_list:
return base
# Else: build another name by counting
i = 0
name = base + str(i)
while name in name_list:
i += 1
name = base + str(i)
return name
class System(object):
'''Diagram description of a system
a System is either an interconnecion of subsystems
or an atomic element (a leaf of the tree)
'''
def __init__(self, name='root', parent=None):
self.name = name
# Parent system, if any (None for top-level):
self.parent = None
# Children systems, if any (None for leaf-level):
self.subsystems = []
self.wires = []
self.ports = []
self.params = {}
# If a parent system is provided, request its addition as a subsystem
if parent is not None:
parent.add_subsystem(self)
#end __init__()
def is_empty(self):
'''True if the System contains no subsystems and no wires'''
return (not self.subsystems) and (not self.wires)
@property
def ports_dict(self):
'''dict of ports, which keys are the names of the ports'''
return {p.name:p for p in self.ports}
@property
def subsystems_dict(self):
'''dict of subsystems, which keys are the names of the systems'''
return {s.name:s for s in self.subsystems}
def add_port(self, port, created_by_system = False):
'''add a Port to the System'''
if port in self.ports:
raise ValueError('port already added!')
# extract the port's name
name = port.name
port_names = [p.name for p in self.ports]
if name in port_names:
raise ValueError("port name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
port.system = self
port._created_by_system = bool(created_by_system)
self.ports.append(port)
def del_port(self, port):
'''delete a Port of the System (and disconnect any connected wire)
'''
if (port.wire is not None) or (port.internal_wire is not None):
# TODO : implement the wire disconnection
raise NotImplementedError('Cannot yet delete a connected Port')
# Remove the ports list:
self.ports.remove(port)
def add_subsystem(self, subsys):
# 1) Check name uniqueness
name = subsys.name
subsys_names = [s.name for s in self.subsystems]
if name in subsys_names:
raise ValueError("system name '{}' already exists in {:s}!".format(
name, repr(self))
)
# 2) Add parent relationship and add to the system list
subsys.parent = self
self.subsystems.append(subsys)
def add_wire(self, wire):
# 1) Check name uniqueness
name = wire.name
wire_names = [w.name for w in self.wires]
if name in wire_names:
raise ValueError("wire name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
wire.parent = self
self.wires.append(wire)
def create_name(self, category, base):
'''Returns a name (str) built on `base` that doesn't exist in
within the names of `category`.
'''
if category == 'subsystem':
components = self.subsystems
elif category == 'wire':
components = self.wires
else:
raise ValueError("Unknown category '{}'!".format(str(category)))
name_list = [c.name for c in components]
return _create_name(name_list, base)
de
|
f __repr__(self):
cls_name = self.__class__.__name__
s = "{:s}('{.name}')".format(cls_name, self)
return s
|
def __str__(self):
s = repr(self)
if self.parent:
s += '\n Parent: {:s}'.format(repr(self.parent))
if self.params:
s += '\n Parameters: {:s}'.format(str(self.params))
if self.ports:
s += '\n Ports: {:s}'.format(str(self.ports))
if self.subsystems:
s += '\n Subsytems: {:s}'.format(str(self.subsystems))
return s
def __eq__(self, other):
'''Systems compare equal if their class, `name` and `params` are equal.
and also their lists of ports and wires are *similar*
(see `_is_similar` methods of Port and Wire)
and finally their subsystems recursively compare equal.
parent systems are not compared (would generate infinite recursion).
'''
if not isinstance(other, System):
return NotImplemented
# Basic similarity
basic_sim = self.__class__ == other.__class__ and \
self.name == other.name and \
self.params == other.params
if not basic_sim:
return False
# Port similarity: (sensitive to the order)
ports_sim = all(p1._is_similar(p2) for (p1,p2)
in zip(self.ports, other.ports))
if not ports_sim:
return False
# Wires similarity
wires_sim = all(w1._is_similar(w2) for (w1,w2)
in zip(self.wires, other.wires))
if not wires_sim:
return False
print('equality at level {} is true'.format(self.name))
# Since everything matches, compare subsystems:
return self.subsystems == other.subsystems
# end __eq__()
def __ne__(self,other):
return not (self==other)
def _to_json(self):
'''convert the System instance to a JSON-serializable object
System is serialized with list of ports, subsystems and wires
but without connectivity information (e.g. no parent information)
ports created at the initialization of the system ("default ports")
are not serialized.
'''
# Filter out ports created at the initialization of the system
ports_list = [p for p in self.ports if not p._created_by_system]
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'System',
'__class__': cls_name,
'name':self.name,
'subsystems':self.subsystems,
'wires':self.wires,
'ports':ports_list,
'params':self.params
}
# end _to_json
def json_dump(self, output=None, indent=2, sort_keys=True):
'''dump (e.g. save) the System structure in json format
if `output` is None: return a json string
if `output` is a writable file: write in this file
'''
import json
if output is None:
return json.dumps(self, default=to_json, indent=indent, sort_keys=sort_keys)
else:
json.dump(self, output, default=to_json, indent=indent, sort_keys=sort_keys)
return
# end json_dump
class Port(object):
'''Port enables the connection of a System to a Wire
Each port has a `type` which only allows the connection of a Wire
of the same type.
it also have a `direction` ('none', 'in', 'out') that is set
at the class level
private attribute `_created_by_system` tells whether the port was created
automatically by the system's class at initialization or by a custom code
(if True,
|
realer01/aiohttp-debugtoolbar
|
aiohttp_debugtoolbar/panels/middlewares.py
|
Python
|
apache-2.0
| 1,143 | 0 |
from .base import DebugPanel
from ..utils import STATIC_ROUTE_NAME
__all__ = ['MiddlewaresDebugPanel']
class MiddlewaresDebugPanel(DebugPanel):
"""
A panel to display the middlewares used by your aiohttp application.
"""
name = 'Middlewares'
has_content = True
template = 'middlewares.jinja2'
title = 'Middlewares'
nav_title = title
de
|
f __init__(self, request):
super().__init__(request)
if not request.app.middlewares:
self.has_content = False
self.is_active = False
else:
self.populate(request)
def populate(self, request):
middleware_names = []
for m in request.app.middlewares:
if hasattr(m, '__name__'):
# name for regular functions
middleware_names.append(m.__name__)
else:
|
middleware_names.append(m.__repr__())
self.data = {'middlewares': middleware_names}
def render_vars(self, request):
static_path = self._request.app.router[STATIC_ROUTE_NAME]\
.url(filename='')
return {'static_path': static_path}
|
s-i-newton/clang-hook
|
lib_hook/filter.py
|
Python
|
apache-2.0
| 1,601 | 0.004372 |
"""Filters allows to match the output against a regex and use the match for statistic purpose."""
import re
import typing
from lib_hook.auto_number import AutoNumber
from .stage import Str_to_stage, Stage
class InvalidStage(Exception):
"""A filter was applied at a wrong stage."""
pass
class FilterMode(AutoNumber):
"""Whether the filter should use lookaroung regex or extract the match thanks to groups."""
LookAround = ()
Groups = ()
class Filter:
"""The Filter class contains everyting about a filter and is made to look into output and returns the match with the
right type."""
def __init__(self, data):
self.name = data["name"]
self.regex = re.compile(data["pattern"])
self.mode = FilterMode.Groups if data["mode"] == "group" else FilterMode.LookAround
self.group = data["group"] if self.mode == FilterMode.Groups else None
self.stages = [Str_to_stage(e) for e in data["stages"]]
self.type = {"bool": bool, "int": int, "float": float, "string": str}[data["type"]]
self.summary = data["summary"]
de
|
f search(self, output: str, stage: Stage) -> typing.Union[bool, int, float, str, None]:
"""Looks for a match in the given string and returns the match. If the filter does not match, return None."""
if stage not in self.stages:
return None
match = self.regex.search(output)
|
if match is None:
return None
if self.mode == FilterMode.Groups:
return self.type(match.group(self.group))
return self.type(match.group(0))
|
YilunZhou/Klampt
|
Python/demos/gltemplate.py
|
Python
|
bsd-3-clause
| 1,532 | 0.02611 |
#!/usr/bin/python
import sys
from klampt import *
from klampt.vis import GLSimulationProgram
class MyGLViewer(GLSimulationProgram):
def __init__(self,files):
#create a world from the given files
world = WorldModel()
for fn in files:
res = world.readFile(fn)
if not res:
raise RuntimeError("Unable to load model "+fn)
#initialize the simulation
GLSimulationProgram.__init__(self,world,"My GL program")
#put custom action hooks here
self.add_action(self.some_function,'Some random function','f')
def some_function(self):
print "The function is called"
def control_loop(self):
#Put your control handler here
pass
def mousefunc(self,button,state,x,y):
#Put your mouse handler here
#the current example prints out the list of objects clicked whenever
#you right click
print "mouse",button,state,x,y
if button==2:
if state==0:
print [o.getName() for o in self.click_world(x,y)]
return
GLSimulationProgram.mousefunc(self,button,state,x,y)
def motionfunc(self,x,y,dx
|
,dy):
return GLSimulationProgram.motionfunc(self,x,y,dx,dy)
if __name__ == "__main__":
print "gltemplate.py: This example demonstrates how to simulate a world and read user input"
if len(sys.argv)<=1:
print "USAGE: gltemplate.py [world_file]"
exit()
viewer = MyGLViewer(sys.argv[1:])
view
|
er.run()
|
craws/OpenAtlas-Python
|
openatlas/views/types.py
|
Python
|
gpl-2.0
| 4,440 | 0 |
from typing import Any, Dict, List, Union
from flask import abort, flash, g, render_template, url_for
from flask_babel import format_number, lazy_gettext as _
from werkzeug.utils import redirect
from werkzeug.wrappers import Response
from openatlas import app
from openatlas.database.connect import Transaction
from openatlas.forms.form import build_move_form
from openatlas.models.entity import Entity
from openatlas.models.node import Node
from openatlas.util.table import Table
from openatlas.util.util import link, required_group, sanitize
def walk_tree(nodes: List[int]) -> List[Dict[str, Any]]:
items = []
for id_ in nodes:
item = g.nodes[id_]
count_subs = f' ({format_number(item.count_subs)})' \
if item.count_subs else ''
items.append({
'id': item.id,
'href': url_for('entity_view', id_=item.id),
'a_attr': {'href': url_for('entity_view', id_=item.id)},
'text':
item.name.replace("'", "'") +
f' {format_number(item.count)}{count_subs}',
'children': walk_tree(item.subs)})
return items
@app.route('/types')
@required_group('readonly')
def node_index() -> str:
nodes: Dict[str, Dict[Entity, str]] = \
{'standard': {}, 'custom': {}, 'places': {}, 'value': {}}
for node in g.nodes.values():
if node.root:
continue
type_ = 'custom'
if node.class_.name == 'administrative_unit':
type_ = 'places'
elif node.standard:
type_ = 'standard'
elif node.value_type:
type_ = 'value'
nodes[type_][node] = render_template(
'forms/tree_select_item.html',
name=sanitize(node.name),
data=walk_tree(Node.get_nodes(node.name)))
return render_template(
'types/index.html',
nodes=nodes,
title=_('types'),
crumbs=[_('types')])
@app.route('/types/delete/<int:id_>', methods=['POST', 'GET'])
@required_group('editor')
def node_delete(id_: int) -> Response:
node = g.nodes[id_]
root = g.nodes[node.root[-1]] if node.root else None
if node.standard or node.subs or node.count or (root and root.locked):
abort(403)
node.delete()
flash(_('entity deleted'), 'info')
return redirect(
url_for('entity_view', id_=root.id) if root else url_for('node_index'))
@app.route('/types/move/<int:id_>', methods=['POST', 'GET'])
@required_group('editor')
def node_move_entities(id_: int) -> Union[str, Response]:
node = g.nodes[id_]
root = g.nodes[node.root[-1]]
if root.value_type: # pragma: no cover
abort(403)
form = build_move_form(node)
if form.validate_on_submit():
Transaction.begin()
Node.move_entities(
node,
getattr(form, str(root.id)).data,
form.checkbox_values.data)
Transaction.commit()
flash(_('Entities were updated'), 'success')
if node.class_.name == 'administrative_unit':
tab = 'places'
elif root.standard:
tab = 'standard'
elif node.value_type: # pragma: no cover
tab = 'value'
else:
tab = 'custom'
return redirect(
f"{url_for('node_index')}#menu-tab-{tab}_collapse-{root.id}")
getattr(form, str(root.id)).data = node.id
return render_template(
'types/move.html',
table=Table(
header=['#', _('selection')],
rows=[[item, item.label.text] for item in form.selection]),
root=root,
form=form,
entity=node,
crumbs=[
[_('types'), url_for('node_index')],
root,
node,
_('move entities')])
@app.route('/types/untyped/<int:id_>')
@required_group('editor')
def show_untyped_entities(id_: int) -> str:
hierarchy = g.nodes[id_]
table = Table(['name', 'class', 'first', 'last', 'description'])
for ent
|
ity in Node.get_untyped(hierarchy.id):
table.rows.append([
link(entity),
entity.class_.label,
entity.
|
first,
entity.last,
entity.description])
return render_template(
'table.html',
entity=hierarchy,
table=table,
crumbs=[
[_('types'),
url_for('node_index')],
link(hierarchy),
_('untyped entities')])
|
eblur/dust
|
astrodust/distlib/__init__.py
|
Python
|
bsd-2-clause
| 67 | 0 |
from .sizedist im
|
port *
from .WD01 import make_WD01_DustSpectr
|
um
|
nddsg/SimpleDBMS
|
simple_dbms/table_iterator.py
|
Python
|
gpl-3.0
| 9,205 | 0.000869 |
from relation_iterator import RelationIterator
from true_expression import TrueExpression
from operation_status import OperationStatus
from data_input_stream import DataInputStream
from insert_row import InsertRow
from column import Column
import simple_dbms
class TableIterator(RelationIterator, object):
"""
A class that serves as an iterator over some or all of the rows in
a stored table. For a given table, there may be more than one
TableIterator open at the same time -- for example, when performing the
cross product of a table with itself.
"""
def __init__(self, stmt, table, eval_where):
"""
Constructs a TableIterator object for the subset of the specified
table that is defined by the given SQLStatement. If the
SQLStatement has a WHERE clause and the evalWhere parameter has a
value of true, the iterator will only visit rows that satisfy the
WHERE clause.
:param stmt: the SQL statement that defines the subset of the table
:param table: the table to iterate over
:param eval_where: should the WHERE clause in stmt be evaluated by this
iterator? If this iterator is being used by a higher-level
iterator, then we can specify false so that the WHERE clause
will not be evaluated at this level.
"""
super(TableIterator, self).__init__()
self.table = table
# Make sure the table is open.
if table.get_db() is None:
raise Exception("table " + table.get_name() + " must be " +
"opened before attempting to create an iterator for it")
# Find all columns from the SQL statement whose values will
# be obtained using this table iterator, and update their
# state so that we can get their values as needed.
table_col = stmt_col = None
for i in range(0, table.num_columns()):
table_col = table.get_column(i)
# check for a match in the SELECT clause
for j in range(0, stmt.num_columns()):
stmt_col = stmt.get_column(j)
if stmt_col.name_matches(table_col, table):
stmt_col.use_col_info(table_col)
stmt_col.set_table_iterator(self)
# check for a match in the WHERE clause
for j in range(0, stmt.num_where_columns()):
stmt_col = stmt.get_where_column(j)
if stmt_col.name_matches(table_col, table):
stmt_col.use_col_info(table_col)
stmt_col.set_table_iterator(self)
self.cursor = table.get_db().cursor(txn=simple_dbms.SimpleDBMS.get_txn(), flags=0)
self.key = None
self.data = None
self.where = stmt.get_where()
if not eval_where:
self.where = None
if self.where is None:
self.where = TrueExpression()
self.num_tuples = 0
def close(self):
"""
Closes the iterator, which closes any BDB handles that it is using.
:return:
"""
if self.cursor is not None:
self.cursor.close()
self.cursor = None
def first(self):
"""
Positions the iterator on the first tuple in the relation, without
taking the a WHERE clause (if any) into effect.
Because this method ignores the WHERE clause, it should
ordinarily be used only when you need to reposition the cursor
at the start of the relation after having completed a previous
iteration.
:return: true if the iterator was advanced to the first tuple, and false
if there are no tuples to visit
"""
if self.cursor is None:
raise Exception("This iterator has been closed")
ret = self.cursor.first() # this.cursor.getFirst(this.key, this.data, null);
if ret == OperationStatus.NOTFOUND:
return False
# Only increment num_tuples if the WHERE clause isn't violated.
if self.where.is_true():
self.num_tuples += 1
return True
def next(self):
"""
Advances the iterator to the next tuple in the relation. If
there is a WHERE clause that limits which tuples should be
included in the relation, this method will advance the iterator
to the next tuple that satisfies the WHERE clause. If the
iterator is newly created, this method will position it on the
first tuple in the relation (that satisfies the WHERE clause).
Provided that the iterator can be positioned on a tuple, the
count of the number of tuples visited by the iterator is
incremented.
:return: true if the iterator was advanced to a new tuple, and false
if there are no more tuples to visit
"""
if self.cursor is None:
raise Exception("this iterator has been closed")
ret = self.cursor.next()
if ret is None:
return False
self.key = ret[0]
self.data = ret[1]
while not self.where.is_true():
ret = self.cursor.next()
if ret is None:
return False
self.key = ret[0]
self.data = ret[1]
self.num_tuples += 1
return True
def get_column(self, col_index):
"""
Gets the column a
|
t the specified index in the relation that
this iterator iterates over. The leftmost column has an index of 0.
:param col_index:
:return: the column
"""
return self.table.get_column(col_index)
def get_column_val(self, col_index):
"""
Gets the value of the column at the specified in
|
dex in the row
on which this iterator is currently positioned. The leftmost
column has an index of 0.
This method will unmarshall the relevant bytes from the
key/data pair and return the corresponding Object -- i.e.,
an object of type String for CHAR and VARCHAR values, an object
of type Integer for INTEGER values, or an object of type Double
for REAL values.
:param col_index:
:return: the value of the column
"""
# Get the specified column and its type.
col = self.table.get_column(col_index)
col_type = col.get_type()
# Create an input stream for the data item in the
# current key/data pair, and mark the beginning of its buffer.
data_in = DataInputStream(self.data)
# Read the appropriate offset from the table of offsets,
# and handle null values.
offset_offset = col_index * 4 # the offset of the offset!
data_in.skip(offset_offset)
offset = data_in.read_int()
if offset == InsertRow.IS_NULL:
return None
# Prepare the appropriate TupleInput object (for either the
# key or the data item), and make the variable "in" refer
# to that object. We also determine the size of the value.
din = None
size = -1
if offset == InsertRow.IS_PKEY:
din = DataInputStream(self.key)
size = len(self.key)
else:
din = data_in
if col_type == Column.VARCHAR:
# Get the next positive offset from the data item,
# so we can compute the size of the VARCHAR in bytes.
# We need a loop so that we can skip over special offsets
# for null values and primary keys.
next_offset = data_in.read_int()
while next_offset == InsertRow.IS_PKEY or next_offset == InsertRow.IS_NULL:
next_offset = data_in.read_int()
size = next_offset - offset
else:
size = col.get_length()
# Skip to the appropriate place in the data item.
# We do this *after* we read all necessary offsets.
din.reset()
din.skip(offset)
# Read the value, and return it as an object of the
# appropriate type.
|
victal/ulp
|
test/test_urlextract.py
|
Python
|
mit
| 1,212 | 0.00495 |
from ulp.urlextract import escape_ansi, parse_input
import os
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def test
|
_parse_no_url_input():
assert len(parse_input("")) == 0
multiline_text = """
text
without URLs
and
multiple lines """
assert len(parse_input(multiline_text)) == 0
def test_extract_one_url():
with open(os.path.join(TEST_DIR, 'example_bitbucket.txt')) as f:
result = parse_input(f.read())
assert len(result) ==
|
1
assert result[0] == 'https://bitbucket.org/owner/repository/pull-requests/new?source=BRANCH&t=1'
def test_extract_multiple_urls_per_line():
input_text = """
two urls
https://example.org/?q=1 https://example.org/?p=2
on the same line"""
result = parse_input(input_text)
assert len(result) == 2
assert 'https://example.org/?q=1' in result
assert 'https://example.org/?p=2' in result
def test_escape_ansi_sequence_url():
with open(os.path.join(TEST_DIR, 'example_terminal_colors.txt')) as f:
result = parse_input(f.read())
assert len(result) == 2
assert 'https://example.org/?p=3707' in result
assert 'https://example.org/anotherurl?q=0m' in result
|
WilliamMarti/gitlistener
|
gitlistener.py
|
Python
|
mit
| 1,001 | 0.02997 |
from flask import Flask
from flask import request
from subprocess import call
import git, json, os, sys
newname = "gitlistener"
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6') #Loading a 3rd party library C
buff = create_string_buffer(len(newname)+1) #Note: One larger than the name (man prctl says that)
buff.value = newname #Null terminated string as it should be
libc.prctl(15, byref(buff), 0, 0, 0) #Refer to "#define" of "/usr/include/linux/prctl.h" for the misterious value 16 & arg[3..5] are zero as the man page says.
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
if request.met
|
hod == 'POST':
repo = git.Repo('/var/www/lunch_app')
print repo.git.status(
|
)
print repo.git.pull()
f = open("keyfile.txt")
pw = f.read()
os.popen("sudo service apache2 reload", "w").write(pw)
else:
print "Wrong"
return "Ran"
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5001)
|
pedrohml/smartbot
|
smartbot/joke_behaviour.py
|
Python
|
mit
| 2,199 | 0.005462 |
# coding: utf-8
from smartbot import Behaviour
from smartbot import Utils
from smartbot import ExternalAPI
import re
import os
import random
class JokeBehaviour(Behaviour):
def __init__(self, bot):
super(JokeBehaviour, self).__init__(bot)
self.language = self.bot.config.get('main', 'language') if self.bot.config.has_option('main', 'language') else 'en-US'
def addHandlers(self):
self.bot.addCommandHandler('joke', self.jokeSearch)
self.bot.addCommandHandler('jalk', self.jalkSearch)
def removeHandlers(self):
self.bot.removeCommandHandler('joke', self.jokeSearch)
self.bot.removeCommandHandler('jalk', self.jalkSearch)
def jokeSearch(self, telegramBot, update):
p = re.compile('([^ ]*) (.*)')
query = (p.match(update.message.text).groups()[1] or '').strip()
self.logDebug(u'Joke search (chat_id: %s, query: %s)' % (update.message.chat_id, query or 'None'))
jokes = ExternalAPI.searchJoke(query)
if jokes:
self.bot.sendMessage(chat_id=update.message.chat_id, text=random.choice(jokes))
def jalkSearch(self, telegramBot, update):
p = re.compile('([^ ]*) (.*)')
query = (p.match(update.message.text).groups()[1] or '').strip()
self.logDebug(u'Jalk search (chat_id: %s, query: %s)' % (update.message.chat_id, query or 'None'))
jokes = ExternalAPI.searchJoke(query)
if jokes:
jokes = filte
|
r(lambda c: len(re.split('\W+', c, re.MULTILINE)) < 200, jokes)
jokes = sorted(jokes, lambda x, y: len(x) - len(y))
if jokes:
joke = jokes[0]
audioFile = ExternalAPI.textToSpeech(joke, language=self.language, encode='mp3')
if os.path.exists(audioFile) and os.path.getsize(audioFile) > 0:
|
self.bot.sendAudio(chat_id=update.message.chat_id, audio=audioFile, performer=self.bot.getInfo().username)
else:
self.bot.sendMessage(chat_id=update.message.chat_id, text=u'Não consigo contar')
else:
self.bot.sendMessage(chat_id=update.message.chat_id, text=u'Não encontrei piada curta')
|
1337/yesterday-i-learned
|
leetcode/172e.py
|
Python
|
gpl-3.0
| 222 | 0 |
class Solution:
d
|
ef trailingZeroes(self, n: int) -> int:
powers_of_5 = []
for i in range(1, 10):
powers_of_5.append(5 ** i)
return su
|
m(n // power_of_5 for power_of_5 in powers_of_5)
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/models/_models.py
|
Python
|
mit
| 848,302 | 0.003644 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AadAuthenticationParameters(msrest.serialization.Model):
"""AAD Vpn authentication type related parameters.
:param aad_tenant: AAD Vpn authentication parameter AAD tenant.
:type aad_tenant: str
:param aad_audience: AAD Vpn authentication parameter AAD audience.
:type aad_audience: str
:param aad_issuer: AAD Vpn authentication parameter AAD issuer.
:type aad_issuer: str
"""
_attribute_map = {
'aad_tenant': {'key': 'aadTenant', 'type': 'str'},
'aad_audience': {'key': 'aadAudience', 'type': 'str'},
'aad_issuer': {'key': 'aadIssuer', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AadAuthenticationParameters, self).__init__(**kwargs)
self.aad_tenant = kwargs.get('aad_tenant', None)
self.aad_audience = kwargs.get('aad_audience', None)
self.aad_issuer = kwargs.get('aad_issuer', None)
class AddressSpace(msrest.serialization.Model):
"""AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
:param address_prefixes: A list of address blocks reserved for this virtual network in CIDR
notation.
:type address_prefixes: list[str]
"""
_attribute_map = {
'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AddressSpace, self).__init__(**kwargs)
self.address_prefixes = kwargs.get('address_prefixes', None)
class Resource(msrest.serialization.Model):
"""Common resource representation.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ApplicationGateway(Resource):
"""Application gateway resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str
|
]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param zones: A list of availability zones denoting where the resource needs to come fro
|
m.
:type zones: list[str]
:param identity: The identity of the application gateway, if configured.
:type identity: ~azure.mgmt.network.v2020_04_01.models.ManagedServiceIdentity
:param sku: SKU of the application gateway resource.
:type sku: ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySku
:param ssl_policy: SSL policy of the application gateway resource.
:type ssl_policy: ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslPolicy
:ivar operational_state: Operational state of the application gateway resource. Possible values
include: "Stopped", "Starting", "Running", "Stopping".
:vartype operational_state: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayOperationalState
:param gateway_ip_configurations: Subnets of the application gateway resource. For default
limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type gateway_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayIPConfiguration]
:param authentication_certificates: Authentication certificates of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type authentication_certificates:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayAuthenticationCertificate]
:param trusted_root_certificates: Trusted Root certificates of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type trusted_root_certificates:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayTrustedRootCertificate]
:param ssl_certificates: SSL certificates of the application gateway resource. For default
limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type ssl_certificates:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslCertificate]
:param frontend_ip_configurations: Frontend IP addresses of the application gateway resource.
For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFrontendIPConfiguration]
:param frontend_ports: Frontend ports of the application gateway resource. For default limits,
see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type frontend_ports:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFrontendPort]
:param probes: Probes of the application gateway resource.
:type probes: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProbe]
:param backend_address_pools: Backend address pool of the application gateway resource. For
default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type backend_address_pools:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendAddressPool]
:param backend_http_settings_collection: Backend http settings of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHttpSettings]
:param http_listeners: Http listeners of the application gateway resource. For default limits,
see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type http_listeners:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayHttpListener]
:para
|
steventimberman/masterDebater
|
venv/lib/python2.7/site-packages/haystack/backends/elasticsearch2_backend.py
|
Python
|
mit
| 14,291 | 0.003149 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
from django.conf import settings
from haystack.backends import BaseEngine
from haystack.backends.elasticsearch_backend import ElasticsearchSearchBackend, ElasticsearchSearchQuery
from haystack.constants import DJANGO_CT
from haystack.exceptions import MissingDependency
from haystack.utils import get_identifier, get_model_ct
from haystack.utils import log as logging
try:
import elasticsearch
if not ((2, 0, 0) <= elasticsearch.__version__ < (3, 0, 0)):
raise ImportError
from elasticsearch.helpers import bulk, scan
except ImportError:
raise MissingDependency("The 'elasticsearch2' backend requires the \
installation of 'elasticsearch>=2.0.0,<3.0.0'. \
Please refer to the documentation.")
class Elasticsearch2SearchBackend(ElasticsearchSearchBackend):
def __init__(self, connection_alias, **connection_options):
super(Elasticsearch2SearchBackend, self).__init__(connection_alias, **connection_options)
self.content_field_name = None
def clear(self, models=None, commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
:param models: List or tuple of models to clear.
:param commit: Not used.
"""
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
self.conn.indices.delete(index=self.index_name, ignore=404)
self.setup_complete = False
self.existing_mapping = {}
self.content_field_name = None
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
# Delete using scroll API
query = {'query': {'query_string': {'query': " OR ".join(models_to_delete)}}}
generator = scan(self.conn, query=query, index=self.index_name, doc_type='modelresult')
actions = ({
'_op_type': 'delete',
'_id': doc['_id'],
} for doc in generator)
bulk(self.conn, actions=actions, index=self.index_name, doc_type='modelresult')
self.conn.indices.refresh(index=self.index_name)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
if models is not None:
self.log.error("Failed to clear Elasticsearch index of models '%s': %s",
','.join(models_to_delete), e, exc_info=True)
else:
self.log.error("Failed to clear Elasticsearch index: %s", e, exc_info=True)
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None):
kwargs = super(Elasticsearch2SearchBackend, self).build_search_kwargs(query_string, sort_by,
start_offset, end_offset,
fields, highlight,
spelling_query=spelling_query,
within=within, dwithin=dwithin,
distance_point=distance_point,
models=models,
limit_to_registered_models=
|
limit_to_registered_models,
result_class=result_class)
filters = []
if start_offset is not None:
kwargs['from'] = start_offset
if end_offset is not None:
kwargs['size'] = end_offset - start_offset
if narrow_queries is None:
|
narrow_queries = set()
if facets is not None:
kwargs.setdefault('aggs', {})
for facet_fieldname, extra_options in facets.items():
facet_options = {
'meta': {
'_type': 'terms',
},
'terms': {
'field': facet_fieldname,
}
}
if 'order' in extra_options:
facet_options['meta']['order'] = extra_options.pop('order')
# Special cases for options applied at the facet level (not the terms level).
if extra_options.pop('global_scope', False):
# Renamed "global_scope" since "global" is a python keyword.
facet_options['global'] = True
if 'facet_filter' in extra_options:
facet_options['facet_filter'] = extra_options.pop('facet_filter')
facet_options['terms'].update(extra_options)
kwargs['aggs'][facet_fieldname] = facet_options
if date_facets is not None:
kwargs.setdefault('aggs', {})
for facet_fieldname, value in date_facets.items():
# Need to detect on gap_by & only add amount if it's more than one.
interval = value.get('gap_by').lower()
# Need to detect on amount (can't be applied on months or years).
if value.get('gap_amount', 1) != 1 and interval not in ('month', 'year'):
# Just the first character is valid for use.
interval = "%s%s" % (value['gap_amount'], interval[:1])
kwargs['aggs'][facet_fieldname] = {
'meta': {
'_type': 'date_histogram',
},
'date_histogram': {
'field': facet_fieldname,
'interval': interval,
},
'aggs': {
facet_fieldname: {
'date_range': {
'field': facet_fieldname,
'ranges': [
{
'from': self._from_python(value.get('start_date')),
'to': self._from_python(value.get('end_date')),
}
]
}
}
}
}
if query_facets is not None:
kwargs.setdefault('aggs', {})
for facet_fieldname, value in query_facets:
kwargs['aggs'][facet_fieldname] = {
'meta': {
'_type': 'query',
},
'filter': {
'query_string': {
'query': value,
}
},
}
for q in narrow_queries:
filters.append({
'query_string': {
'query': q
}
})
# if we want to filter, change the query type to filteres
if filters:
kwargs["query"] = {"filtered": {"query": kwargs.pop("query")}}
filtered = kwargs["query"]["filtered"]
if 'filter' in filtered:
|
alfredodeza/merfi
|
merfi/backends/rpm_sign.py
|
Python
|
mit
| 5,502 | 0.000364 |
from time import sleep
import os
import shutil
import merfi
from merfi import logger
from merfi import util
from merfi.collector import RepoCollector
from merfi.backends import base
class RpmSign(base.BaseBackend):
help_menu = 'rpm-sign handler for signing files'
_help = """
Signs files with rpm-sign. Crawls a given path looking for Debian repos.
Note: this sub-command tells merfi to use Red Hat's internal signing tool
inconveniently named "rpm-sign", not the rpmsign(8) command that is a part of
the http://rpm.org open-source project.
%s
Options
--key Name of the key to use (see rpm-sign --list-keys)
--keyfile File path location of the public keyfile, for example
/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
or /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
--nat A NAT is between this system and the signing server.
Positional Arguments:
[path] The path to crawl for signing repos. Defaults to current
working directory
"""
executable = 'rpm-sign'
name = 'rpm-sign'
options = ['--key', '--keyfile', '--nat']
def clear_sign(self, path, command):
"""
When doing a "clearsign" with rpm-sign, the output goes to stdout, so
that needs to be captured and written to the default output file for
clear signed signatures (InRelease).
"""
logger.info('signing: %s' % path)
out, err, code = util.run_output(command)
# Sometimes rpm-sign will fail with this error. I've opened
# rhbz#1557014 to resolve this server-side. For now, sleep and retry
# as a workaround. These sleep/retry values are suggestions from the
# team that runs the signing service.
known_failure = "ERROR: unhandled exception occurred: ('')."
tries = 1
while known_failure in err and tries < 30:
logger.warning('hit known rpm-sign failure.')
tries += 1
logger.warning('sleeping, running try #%d in 30 seconds.' % tries)
sleep(2)
out, err, code = util.run_output(command)
if code != 0:
for line in err.split('\n'):
logger.error('stderr: %s' % line)
for line in out.split('\n'):
logger.error('stdout: %s' % line)
raise RuntimeError('rpm-sign non-zero exit code %d', code)
if out.strip() == '':
for line in err.split('\n'):
logger.error('stderr: %s' % line)
logger.error('rpm-sign clearsign provided nothing on stdout')
raise RuntimeError('no clearsign signature available')
absolute_directory = os.path.dirname(os.path.abspath(path))
with open(os.path.join(absolute_directory, 'InRelease'), 'w') as f:
f.write(out)
def detached(self, command):
return util.run(command)
def sign(self):
self.keyfile = self.parser.get('--keyfile')
if self.keyfile:
self.keyfile = os.path.abspath(self.keyfile)
if not os.path.isfile(self.keyfile):
raise RuntimeError('%s is not a file' % self.keyfile)
logger.info('using keyfile "%s" as release.asc' % self.keyfile)
self.key = self.parser.get('--key')
if not self.key:
raise RuntimeError('specify a --key for signing')
logger.info('Starting path collection, looking for files to sign')
repos = RepoCollector(self.path)
if repos:
logger.info('%s repos found' % len(repos))
# FIXME: this should spit the actual verified command
logger.info('will sign with the following commands:')
logger.info('rpm-sign --key "%s" --detachsign Release --output Release.gpg' % self.key)
logger.info('rpm-sign --key "%s" --clearsign Release --output InRelease' % self.key)
else:
logger.warning('No paths found that matched')
for repo in repos:
# Debian "Release" files:
|
for path in repo.releases:
self.sign_release(path)
# Public key:
if self.keyfile:
logger.info('placing release.asc in %s' % repo.path)
if merfi.config.get('check'):
|
logger.info('[CHECKMODE] writing release.asc')
else:
shutil.copyfile(
self.keyfile,
os.path.join(repo.path, 'release.asc'))
def sign_release(self, path):
""" Sign a "Release" file from a Debian repo. """
if merfi.config.get('check'):
new_gpg_path = path.split('Release')[0]+'Release.gpg'
new_in_path = path.split('Release')[0]+'InRelease'
logger.info('[CHECKMODE] signing: %s' % path)
logger.info('[CHECKMODE] signed: %s' % new_gpg_path)
logger.info('[CHECKMODE] signed: %s' % new_in_path)
else:
os.chdir(os.path.dirname(path))
detached = ['rpm-sign', '--key', self.key, '--detachsign',
'Release', '--output', 'Release.gpg']
clearsign = ['rpm-sign', '--key', self.key, '--clearsign',
'Release']
if self.parser.has('--nat'):
detached.insert(1, '--nat')
clearsign.insert(1, '--nat')
logger.info('signing: %s' % path)
self.detached(detached)
self.clear_sign(path, clearsign)
|
appsembler/mayan_appsembler
|
fabfile/platforms/linux.py
|
Python
|
gpl-3.0
| 922 | 0.004338 |
import os
from fabric.api import run, sudo, cd, env, task, settings
from ..literals import FABFILE_MARKER
def delete_mayan():
"""
Delete Mayan EDMS files from an Linux system
"""
sudo('rm %(virtualenv_path)s -Rf' % env)
def install_mayan():
"""
Install Mayan EDMS on an Linux system
"""
with cd(env.install_path):
sudo(
|
'virtualenv --no-site-packages %(virtualenv_name)s' % env)
with cd
|
(env.virtualenv_path):
sudo('git clone git://github.com/rosarior/mayan.git %(repository_name)s' % env)
sudo('source bin/activate; pip install --upgrade distribute')
sudo('source bin/activate; pip install -r %(repository_name)s/requirements/production.txt' % env)
def post_install():
"""
Post install process on a Linux systems
"""
fabfile_marker = os.path.join(env.repository_path, FABFILE_MARKER)
sudo('touch %s' % fabfile_marker)
|
cisco-openstack/tempest
|
tempest/cmd/workspace.py
|
Python
|
apache-2.0
| 9,103 | 0 |
# Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages Tempest workspaces
This command is used for managing tempest workspaces
Commands
========
list
----
Outputs the name and path of all known tempest workspaces
register
--------
Registers a new tempest workspace via a given ``--name`` and ``--path``
rename
------
Renames a tempest workspace from ``--old-name`` to ``--new-name``
move
----
Changes the path of a given tempest workspace ``--name`` to ``--path``
remove
------
Deletes the entry for a given tempest workspace ``--name``
``--rmdir`` Deletes the given tempest workspace directory
General Options
===============
* ``--workspace_path``: Allows the user to specify a different location for the
workspace.yaml file containing the workspace definitions instead of
``~/.tempest/workspace.yaml``
"""
import os
import shutil
import sys
from cliff import command
from cliff import lister
from oslo_concurrency import lockutils
import yaml
from tempest import config
CONF = config.CONF
class WorkspaceManager(object):
def __init__(self, path=None):
lockutils.get_lock_path(CONF)
self.path = path or os.path.join(
os.path.expanduser("~"), ".tempest", "workspace.yaml")
if not os.path.isdir(os.path.dirname(self.path)):
os.makedirs(self.path.rsplit(os.path.sep, 1)[0])
self.workspaces = {}
@lockutils.synchronized('workspaces', external=True)
def get_workspace(self, name):
"""Returns the workspace that has the given name
If the workspace isn't registered then `None` is returned.
"""
self._populate()
return self.workspaces.get(name)
@lockutils.synchronized('workspaces', external=True)
def rename_workspace(self, old_name, new_name):
self._populate()
self._name_exists(old_name)
self._invalid_name_check(new_name)
self._workspace_name_exists(new_name)
self.workspaces[new_name] = self.workspaces.pop(old_name)
self._write_file()
@lockutils.synchronized('workspaces', external=True)
def move_workspace(self, name, path):
self._populate()
path = os.path.abspath(os.path.expanduser(path)) if path else path
self._name_exists(name)
self._validate_path(path)
self.workspaces[name] = path
self._write_file()
def _name_exists(self, name):
if name not in self.workspaces:
print("A workspace was not found with name: {0}".format(name))
sys.exit(1)
@lockutils.synchronized('workspaces', external=True)
def remove_workspace_entry(self, name):
self._populate()
self._name_exists(nam
|
e)
workspace_path = se
|
lf.workspaces.pop(name)
self._write_file()
return workspace_path
@lockutils.synchronized('workspaces', external=True)
def remove_workspace_directory(self, workspace_path):
self._validate_path(workspace_path)
shutil.rmtree(workspace_path)
@lockutils.synchronized('workspaces', external=True)
def list_workspaces(self):
self._populate()
self._validate_workspaces()
return self.workspaces
def _workspace_name_exists(self, name):
if name in self.workspaces:
print("A workspace already exists with name: {0}.".format(
name))
sys.exit(1)
def _invalid_name_check(self, name):
if not name:
print("None or empty name is specified."
" Please specify correct name for workspace.")
sys.exit(1)
def _validate_path(self, path):
if not path:
print("None or empty path is specified for workspace."
" Please specify correct workspace path.")
sys.exit(1)
if not os.path.exists(path):
print("Path does not exist.")
sys.exit(1)
@lockutils.synchronized('workspaces', external=True)
def register_new_workspace(self, name, path, init=False):
"""Adds the new workspace and writes out the new workspace config"""
self._populate()
path = os.path.abspath(os.path.expanduser(path)) if path else path
# This only happens when register is called from outside of init
if not init:
self._validate_path(path)
self._invalid_name_check(name)
self._workspace_name_exists(name)
self.workspaces[name] = path
self._write_file()
def _validate_workspaces(self):
if self.workspaces is not None:
self.workspaces = {n: p for n, p in self.workspaces.items()
if os.path.exists(p)}
self._write_file()
def _write_file(self):
with open(self.path, 'w') as f:
f.write(yaml.dump(self.workspaces))
def _populate(self):
if not os.path.isfile(self.path):
return
with open(self.path, 'r') as f:
self.workspaces = yaml.safe_load(f) or {}
def add_global_arguments(parser):
parser.add_argument(
'--workspace-path', required=False, default=None,
help="The path to the workspace file, the default is "
"~/.tempest/workspace.yaml")
return parser
class TempestWorkspaceRegister(command.Command):
def get_description(self):
return ('Registers a new tempest workspace via a given '
'--name and --path')
def get_parser(self, prog_name):
parser = super(TempestWorkspaceRegister, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--name', required=True)
parser.add_argument('--path', required=True)
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
self.manager.register_new_workspace(parsed_args.name, parsed_args.path)
sys.exit(0)
class TempestWorkspaceRename(command.Command):
def get_description(self):
return 'Renames a tempest workspace from --old-name to --new-name'
def get_parser(self, prog_name):
parser = super(TempestWorkspaceRename, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--old-name', required=True)
parser.add_argument('--new-name', required=True)
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
self.manager.rename_workspace(
parsed_args.old_name, parsed_args.new_name)
sys.exit(0)
class TempestWorkspaceMove(command.Command):
def get_description(self):
return 'Changes the path of a given tempest workspace --name to --path'
def get_parser(self, prog_name):
parser = super(TempestWorkspaceMove, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--name', required=True)
parser.add_argument('--path', required=True)
return parser
def take_action(self, parsed_args):
self.manager = WorkspaceManager(parsed_args.workspace_path)
self.manager.move_workspace(parsed_args.name, parsed_args.path)
sys.exit(0)
class TempestWorkspaceRemove(command.Command):
def get_description(self):
return 'Deletes the entry for a given tempest workspace --name'
def get_parser(self, prog_name):
parser = super(TempestWorkspaceRemove, self).get_parser(prog_name)
add_global_arguments(parser)
parser.add_argument('--name', required=True)
parser.add_argument('--rmdir', action='store_true',
|
Psycojoker/wanawana
|
events/emails.py
|
Python
|
gpl-3.0
| 2,656 | 0.006401 |
from django.core.mail import send_mail
from django.template.loader import render_to_string
from wanawana.utils import get_base_url
def send_admin_link_on_event_creation(request, event):
if not event.admin_email:
return
email_body = render_to_string("emails/new_event.txt", {
"url_scheme": request.META["wsgi.url_scheme"],
"base_url": get_base_url(request),
"event_slug": event.slug,
"event_admin_id": event.admin_id
})
send_mail("[WanaWana] the admin url for you event '%s'" % (event.title),
email_body,
'noreply@%s' % get_base_url(request),
[event.admin_email]
)
def send_admin_notification_of_answer_on_event(request, event, event_attending):
if not event.admin_email or not event.send_notification_emails:
return
email_body = render_to_string("emails/event_attending_answer.txt", {
"url_scheme": request.META["wsgi.url_scheme"],
"base_url": get_base_url(request),
"event_attending": event_attending,
"event": event,
})
send_mail("[Wanawana] %s has answer '%s' to your event '%s'" % (event_attending.name, event_attending.choice, event.title),
email_body,
"noreply@%s" % get_base_url(request),
[event.admin_email])
def send_admin_notification_of_answer_modification(request, event, event_attending, modifications):
if not event.admin_email or not event.send_notification_emails or not modifications:
return
email_body = render_to_string("emails/event_attending_answer_modification.txt", {
"url_scheme": request.META["wsgi.url_scheme"],
"base_url": get_base_url(request),
"event_attending": event_attending,
"event": event,
"modifications": modifications,
})
send_mail("[Wanawana] %s has modify his answer to your event '%s'" % (event_attending.name, event.title),
|
email_body,
"noreply@%s" % get_base_url(request),
[event.admin_email])
def send_admin_notification_for_new_comme
|
nt(request, event, comment):
if not event.admin_email or not event.send_notification_emails:
return
email_body = render_to_string("emails/event_for_admin_new_comment.txt", {
"url_scheme": request.META["wsgi.url_scheme"],
"base_url": get_base_url(request),
"event": event,
"comment": comment,
})
send_mail("[Wanawana] new comment by %s to your event '%s'" % (comment.name, event.title),
email_body,
"noreply@%s" % get_base_url(request),
[event.admin_email])
|
bnjones/Mathics
|
mathics/core/characters.py
|
Python
|
gpl-3.0
| 43,648 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Character ranges of letters
letters = 'a-zA-Z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u0103\u0106\u0107\
\u010c-\u010f\u0112-\u0115\u011a-\u012d\u0131\u0141\u0142\u0147\u0148\
\u0150-\u0153\u0158-\u0161\u0164\u0165\u016e-\u0171\u017d\u017e\
\u0391-\u03a1\u03a3-\u03a9\u03b1-\u03c9\u03d1\u03d2\u03d5\u03d6\
\u03da-\u03e1\u03f0\u03f1\u03f5\u210a-\u210c\u2110-\u2113\u211b\u211c\
\u2128\u212c\u212d\u212f-\u2131\u2133-\u2138\uf6b2-\uf6b5\uf6b7\uf6b9\
\uf6ba-\uf6bc\uf6be\uf6bf\uf6c1-\uf700\uf730\uf731\uf770\uf772\uf773\
\uf776\uf779\uf77a\uf77d-\uf780\uf782-\uf78b\uf78d-\uf78f\uf790\
\uf793-\uf79a\uf79c-\uf7a2\uf7a4-\uf7bd\uf800-\uf833\ufb01\ufb02'
# Character ranges of letterlikes
letterlikes = '\u0024\u00A1\u00A2\u00A3\u00A5\u00A7\u00A9\u00AB\u00AE\
\u00B0\u00B5\u00B6\u00B8\u00BB\u00BF\u02C7\u02D8\u2013\u2014\u2020\u2021\
\u2022\u2026\u2032\u2033\u2035\u2036\u2060\u20AC\u210F\u2122\u2127\u212B\
\u21B5\u2205\u221E\u221F\u2220\u2221\u2222\u22EE\u22EF\u22F0\u22F1\u2300\
\u2318\u231A\u23B4\u23B5\u2500\u2502\u25A0\u25A1\u25AA\u25AE\u25AF\u25B2\
\u25B3\u25BC\u25BD\u25C0\u25C6\u25C7\u25CB\u25CF\u25E6\u25FB\u25FC\u2605\
\u2639\u263A\u2660\u2661\u2662\u2663\u266D\u266E\u266F\u2736\uF3A0\uF3B8\
\uF3B9\uF527\uF528\uF720\uF721\uF722\uF723\uF725\uF749\uF74A\uF74D\uF74E\
\uF74F\uF750\uF751\uF752\uF753\uF754\uF755\uF756\uF757\uF760\uF763\uF766\
\uF768\uF769\uF76A\uF76B\uF76C\uF7D4\uF800\uF801\uF802\uF803\uF804\uF805\
\uF806\uF807\uF808\uF809\uF80A\uF80B\uF80C\uF80D\uF80E\uF80F\uF810\uF811\
\uF812\uF813\uF814\uF815\uF816\
|
uF817\uF818\uF819\uF81A\uF81B\uF81C\uF81D\
\uF81E\uF81F\uF820\uF821\uF822\uF823\uF824\uF825\uF826\uF827\uF828\uF829\
\uF82A\uF82B\uF82C\uF82D\uF82E\uF82F\uF830\uF831\uF832\uF833\uFE35\uFE36\
\uFE37\uFE38'
# All supported longname characters
named_characters = {
'AAcute': '\u00E1',
'ABar': '\u0101',
'ACup': '\u0103',
'ADoubleDot': '\u00E4',
'AE': '\u00E6',
'AGrave'
|
: '\u00E0',
'AHat': '\u00E2',
'Aleph': '\u2135',
'AliasDelimiter': '\uF764',
'AliasIndicator': '\uF768',
'AlignmentMarker': '\uF760',
'Alpha': '\u03B1',
'AltKey': '\uF7D1',
'And': '\u2227',
'Angle': '\u2220',
'Angstrom': '\u212B',
'ARing': '\u00E5',
'AscendingEllipsis': '\u22F0',
'ATilde': '\u00E3',
'AutoLeftMatch': '\uF3A8',
'AutoOperand': '\uF3AE',
'AutoPlaceholder': '\uF3A4',
'AutoRightMatch': '\uF3A9',
'AutoSpace': '\uF3AD',
'Backslash': '\u2216',
'BeamedEighthNote': '\u266B',
'BeamedSixteenthNote': '\u266C',
'Because': '\u2235',
'Bet': '\u2136',
'Beta': '\u03B2',
'BlackBishop': '\u265D',
'BlackKing': '\u265A',
'BlackKnight': '\u265E',
'BlackPawn': '\u265F',
'BlackQueen': '\u265B',
'BlackRook': '\u265C',
'Breve': '\u02D8',
'Bullet': '\u2022',
'CAcute': '\u0107',
'CapitalAAcute': '\u00C1',
'CapitalABar': '\u0100',
'CapitalACup': '\u0102',
'CapitalADoubleDot': '\u00C4',
'CapitalAE': '\u00C6',
'CapitalAGrave': '\u00C0',
'CapitalAHat': '\u00C2',
'CapitalAlpha': '\u0391',
'CapitalARing': '\u00C5',
'CapitalATilde': '\u00C3',
'CapitalBeta': '\u0392',
'CapitalCAcute': '\u0106',
'CapitalCCedilla': '\u00C7',
'CapitalCHacek': '\u010C',
'CapitalChi': '\u03A7',
'CapitalDelta': '\u0394',
'CapitalDHacek': '\u010E',
'CapitalDifferentialD': '\uF74B',
'CapitalDigamma': '\u03DC',
'CapitalEAcute': '\u00C9',
'CapitalEBar': '\u0112',
'CapitalECup': '\u0114',
'CapitalEDoubleDot': '\u00CB',
'CapitalEGrave': '\u00C8',
'CapitalEHacek': '\u011A',
'CapitalEHat': '\u00CA',
'CapitalEpsilon': '\u0395',
'CapitalEta': '\u0397',
'CapitalEth': '\u00D0',
'CapitalGamma': '\u0393',
'CapitalIAcute': '\u00CD',
'CapitalICup': '\u012C',
'CapitalIDoubleDot': '\u00CF',
'CapitalIGrave': '\u00CC',
'CapitalIHat': '\u00CE',
'CapitalIota': '\u0399',
'CapitalKappa': '\u039A',
'CapitalKoppa': '\u03DE',
'CapitalLambda': '\u039B',
'CapitalLSlash': '\u0141',
'CapitalMu': '\u039C',
'CapitalNHacek': '\u0147',
'CapitalNTilde': '\u00D1',
'CapitalNu': '\u039D',
'CapitalOAcute': '\u00D3',
'CapitalODoubleAcute': '\u0150',
'CapitalODoubleDot': '\u00D6',
'CapitalOE': '\u0152',
'CapitalOGrave': '\u00D2',
'CapitalOHat': '\u00D4',
'CapitalOmega': '\u03A9',
'CapitalOmicron': '\u039F',
'CapitalOSlash': '\u00D8',
'CapitalOTilde': '\u00D5',
'CapitalPhi': '\u03A6',
'CapitalPi': '\u03A0',
'CapitalPsi': '\u03A8',
'CapitalRHacek': '\u0158',
'CapitalRho': '\u03A1',
'CapitalSampi': '\u03E0',
'CapitalSHacek': '\u0160',
'CapitalSigma': '\u03A3',
'CapitalStigma': '\u03DA',
'CapitalTau': '\u03A4',
'CapitalTHacek': '\u0164',
'CapitalTheta': '\u0398',
'CapitalThorn': '\u00DE',
'CapitalUAcute': '\u00DA',
'CapitalUDoubleAcute': '\u0170',
'CapitalUDoubleDot': '\u00DC',
'CapitalUGrave': '\u00D9',
'CapitalUHat': '\u00DB',
'CapitalUpsilon': '\u03A5',
'CapitalURing': '\u016E',
'CapitalXi': '\u039E',
'CapitalYAcute': '\u00DD',
'CapitalZeta': '\u0396',
'CapitalZHacek': '\u017D',
'Cap': '\u2322',
'CCedilla': '\u00E7',
'Cedilla': '\u00B8',
'CenterDot': '\u00B7',
'CenterEllipsis': '\u22EF',
'Cent': '\u00A2',
'CHacek': '\u010D',
'Checkmark': '\u2713',
'Chi': '\u03C7',
'CircleDot': '\u2299',
'CircleMinus': '\u2296',
'CirclePlus': '\u2295',
'CircleTimes': '\u2297',
'ClockwiseContourIntegral': '\u2232',
'CloseCurlyDoubleQuote': '\u201D',
'CloseCurlyQuote': '\u2019',
'CloverLeaf': '\u2318',
'ClubSuit': '\u2663',
'Colon': '\u2236',
'CommandKey': '\uF76A',
'Congruent': '\u2261',
'Conjugate': '\uF3C8',
'ConjugateTranspose': '\uF3C9',
'ConstantC': '\uF7DA',
'Continuation': '\uF3B1',
'ContourIntegral': '\u222E',
'ControlKey': '\uF763',
'Coproduct': '\u2210',
'Copyright': '\u00A9',
'CounterClockwiseContourIntegral': '\u2233',
'Cross': '\uF4A0',
'CupCap': '\u224D',
'Cup': '\u2323',
'CurlyCapitalUpsilon': '\u03D2',
'CurlyEpsilon': '\u03B5',
'CurlyKappa': '\u03F0',
'CurlyPhi': '\u03C6',
'CurlyPi': '\u03D6',
'CurlyRho': '\u03F1',
'CurlyTheta': '\u03D1',
'Currency': '\u00A4',
'Dagger': '\u2020',
'Dalet': '\u2138',
'Dash': '\u2013',
'Degree': '\u00B0',
'DeleteKey': '\uF7D0',
'Del': '\u2207',
'Delta': '\u03B4',
'DescendingEllipsis': '\u22F1',
'DHacek': '\u010F',
'Diameter': '\u2300',
'Diamond': '\u22C4',
'DiamondSuit': '\u2662',
'DifferenceDelta': '\u2206',
'DifferentialD': '\uF74C',
'Digamma': '\u03DD',
'DiscreteRatio': '\uF4A4',
'DiscreteShift': '\uF4A3',
'DiscretionaryHyphen': '\u00AD',
'DiscretionaryLineSeparator': '\uF76E',
'DiscretionaryParagraphSeparator': '\uF76F',
'Divide': '\u00F7',
'DotEqual': '\u2250',
'DotlessI': '\u0131',
'DotlessJ': '\uF700',
'DottedSquare': '\uF751',
'DoubleContourIntegral': '\u222F',
'DoubleDagger': '\u2021',
'DoubledGamma': '\uF74A',
'DoubleDownArrow': '\u21D3',
'DoubledPi': '\uF749',
'DoubleLeftArrow': '\u21D0',
'DoubleLeftRightArrow': '\u21D4',
'DoubleLeftTee': '\u2AE4',
'DoubleLongLeftArrow': '\u27F8',
'DoubleLongLeftRightArrow': '\u27FA',
'DoubleLongRightArrow': '\u27F9',
'DoublePrime': '\u2033',
'DoubleRightArrow': '\u21D2',
'DoubleRightTee': '\u22A8',
'DoubleStruckA': '\uF6E6',
'DoubleStruckB': '\uF6E7',
'DoubleStruckC': '\uF6E8',
'DoubleStruckCapitalA': '\uF7A4',
'DoubleStruckCapitalB': '\uF7A5',
'DoubleStruckCapitalC': '\uF7A6',
'DoubleStruckCapitalD': '\uF7A7',
'DoubleStruckCapitalE': '\uF7A8',
'DoubleStruckCapitalF': '\uF7A9',
'DoubleStruckCapitalG': '\uF7AA',
'DoubleStruckCapitalH': '\uF7AB',
'DoubleStruckCapitalI': '\uF7AC',
'DoubleStruckCapitalJ': '\uF7AD',
'DoubleStruckCapitalK': '\uF7AE',
'DoubleStruckCapitalL':
|
yast/yast-python-bindings
|
examples/PatternSelector-empty.py
|
Python
|
gpl-2.0
| 722 | 0.01385 |
# encoding: utf-8
# Simple example for Patt
|
ernSelector
from yast import import_module
import_module('UI')
from yast import *
class PatternSelectorEmptyClient:
def main(self):
if not UI.HasSpecialWidget("PatternSelector"):
UI.OpenDialog(
VBox(
Label("Error: This UI doesn't support the PatternSelector widget!"),
PushButton(Opt("default"), "&OK")
)
)
|
UI.UserInput()
UI.CloseDialog()
return
UI.OpenDialog(Opt("defaultsize"), PatternSelector(Id("selector")))
input = UI.RunPkgSelection(Id("selector"))
UI.CloseDialog()
ycpbuiltins.y2milestone("Input: %1", input)
PatternSelectorEmptyClient().main()
|
super3/PyDev
|
Class/IsoGame/main.py
|
Python
|
mit
| 2,825 | 0.044248 |
# Name: Main.py
# Author: Shawn Wilkinson
# Imports
from random import choice
# Welcome Message
print("Welcome to the Isolation Game!")
# Make Board
board = [['' for col in range(4)] for row in range(4)]
# Print Board Functin
def show():
# Tmp
|
string
tmpStr = ""
# Loop through board
tmpStr += "\nBoard:"
tmpStr += "\n-------------------------\n"
for x in range(4):
for y in range(4):
if board[x][y] == '':
tmpStr += " |"
else:
tmpStr += str( board[x][y] ) + "|"
tmpStr += "\n-------------------------\n"
# Return tmp string
print(tmpStr)
# Select Vars
player = ''
ai = ''
isXO = ''
firstPlay = ''
playBool = False
# Loop For XO State
while 1:
isXO = str(input("Am I 'x' or 'o'? "))
if isXO == 'x':
ai = '
|
x'
player = 'o'
break
elif isXO == 'o':
ai = 'o'
player = 'x'
break
else:
print("Invalid choice. Try again.")
# Loop For First State
while 1:
firstPlay = input("Do I got first ('y' or 'n')? ")
if firstPlay == 'y':
playBool = True
break
elif firstPlay == 'n':
playBool = False
break
else:
print("Invalid choice. Try again.")
# Start and Show Board
board[0][0] = 'x'
board[3][3] = 'o'
show()
# Move Functions
def aiMove():
# Possible Moves
possMoves = []
# Find Loc
for x in range(4):
for y in range(4):
if board[x][y] == ai:
locX = int(x)
locY = int(y)
# Horizontal
for x in range(-4, 5):
if (locX + x) < 4 and (locX + x) >= 0:
if board[locX + x][locY] == "#" or board[locX + x][locY] == player:
break
if board[locX + x][locY] == '':
possMoves.append( [locX + x, locY] )
# Vertical
for y in range(-4, 5):
if (locY + y) < 4 and (locY + y) >= 0:
if board[locX][locY + y] == "#" or board[locX][locY + y] == player:
break
if board[locX][locY + y] == '':
possMoves.append( [locX, locY + y] )
# Diagonal
for dia in range(-4, 5):
if (locY + dia) < 4 and (locY + dia) >= 0:
if (locX + dia) < 4 and (locX + dia) >= 0:
if board[locX + dia][locY + + dia] == "#" or board[locX + dia][locY + dia] == player:
break
if board[locX + dia][locY + dia] == '':
possMoves.append( [locX + dia, locY + dia] )
# Possible Moves Len
print("Possible Moves: " + str(len(possMoves)))
if(len(possMoves) == 0):
print("Gave Over!")
move = choice(possMoves)
print(move)
moveX = move[0]
moveY = move[1]
print("Move Choice" + str(moveX) + ":" + str(moveY))
board[moveX][moveY] = ai
# Clear Old Space
board[locX][locY] = '#'
def playerMove(PosX, PosY):
for x in range(4):
for y in range(4):
if board[x][y] == player:
board[x][y] = '#'
board[PosX][PosY] = player
# Game Loop
while 1:
if playBool == True:
aiMove()
playBool = False
else:
x = int(input("Enter Player X:"))
y = int(input("Enter Player Y:"))
playerMove(x, y)
playBool = True
show()
|
Toilal/mailinabox
|
tools/mail.py
|
Python
|
cc0-1.0
| 4,331 | 0.027938 |
#!/usr/bin/python3
import sys, getpass, urllib.request, urllib.error, json
def mgmt(cmd, data=None, is_json=False):
# The base URL for the management daemon. (Listens on IPv4 only.)
mgmt_uri = 'http://127.0.0.1:10222'
setup_key_auth(mgmt_uri)
req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
if e.code == 401:
try:
print(e.read().decode("utf8"))
except:
pass
print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr)
elif hasattr(e, 'read'):
print(e.read().decode('utf8'), file=sys.stderr)
else:
print(e, file=sys.stderr)
sys.exit(1)
resp = response.read().decode('utf8')
if is_json: resp = json.loads(resp)
return resp
def read_password():
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
while first != second:
print('Passwords not the same. Try again.')
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
return first
def setup_key_auth(mgmt_uri):
key = open('/var/lib/mailinabox/api.key').read().strip()
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='Mail-in-a-Box Management Server',
uri=mgmt_uri,
user=key,
passwd='')
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
if len(sys.argv) < 2:
print("Usage: ")
print(" tools/mail.py user (lists users)")
print(" tools/mail.py user add user@domain.com [password]")
print(" tools/mail.py user password user@domain.com [password]")
print(" tools/mail.py user remove user@domain.com")
print(" tools/mail.py user make-admin user@domain.com")
print(" tools/mail.py user remove-admin user@domain.com")
print(" tools/mail.py user admins (lists admins)")
print(" tools/mail.py alias (lists aliases)")
print(" tools/mail.py alias add incoming.name@domain.com sent.to@other.domain.com")
print(" tools/mail.py alias add incoming.name@domain.com 'sent.to@other.domain.com, multiple.people@other.domain.com'")
print(" tools/mail.py alias remove incoming.name@domain.com")
print()
print("Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login.")
print()
elif sys.argv[1] == "user" and len(sys.argv) == 2:
# Dump a list of users, one per line. Mark admins with an asterisk.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if user['status'] == 'inactive': continue
print(user['email'], end='')
if "admin" in user['privileges']:
print("*", end='')
print()
elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"):
if len(sys.argv) < 5:
if len(sys.argv) < 4:
email = input("email: ")
else:
email = sys.argv[3]
pw = read_password()
else:
email, pw = sys.argv[3:5]
if sys.argv[2] == "add":
print(mgmt("/mail/users/add", { "email": email, "password": pw }))
elif sys.argv[2] == "password":
print(mgmt("/mail/users/password", { "email": email, "password": pw }))
elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/users/remove
|
", { "email": sys.argv[3] }))
elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin", "remove-admin") and len(sys.argv) == 4:
if sys.argv[2] == "make-admin":
action = "add"
else:
action = "remove"
print(mgmt("/mail/user
|
s/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" }))
elif sys.argv[1] == "user" and sys.argv[2] == "admins":
# Dump a list of admin users.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if "admin" in user['privileges']:
print(user['email'])
elif sys.argv[1] == "alias" and len(sys.argv) == 2:
print(mgmt("/mail/aliases"))
elif sys.argv[1] == "alias" and sys.argv[2] == "add" and len(sys.argv) == 5:
print(mgmt("/mail/aliases/add", { "source": sys.argv[3], "destination": sys.argv[4] }))
elif sys.argv[1] == "alias" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/aliases/remove", { "source": sys.argv[3] }))
else:
print("Invalid command-line arguments.")
sys.exit(1)
|
oxc/Flexget
|
flexget/plugins/output/rapidpush.py
|
Python
|
mit
| 6,452 | 0.00124 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.utils import json
from flexget.utils.template import RenderError
from flexget.config_schema import one_or_more
log = logging.getLogger('rapidpush')
url = 'https://rapidpush.net/api'
class OutputRapidPush(object):
"""
Example::
rapidpush:
apikey: xxxxxxx (can also be a list of api keys)
[category: category, default FlexGet]
[title: title, default New release]
[group: device group, default no group]
[message: the message, default {{title}}]
[channel: the broadcast notification channel, if provided it will be send to the channel subscribers instead of
your devices, default no channel]
[priority: 0 - 6 (6 = highest), default 2 (normal)]
[notify_accepted: boolean true or false, default true]
[notify_rejected: boolean true or false, default false]
[notify_failed: boolean true or false, default false]
[notify_undecided: boolean true or false, default false]
Configuration parameters are also supported from entries (eg. through set).
"""
schema = {
'type': 'object',
'properties': {
'apikey': one_or_more({'type': 'string'}),
'category': {'type': 'string', 'default': 'Flexget'},
'title': {'type': 'string', 'default': 'New Release'},
'group': {'type': 'string', 'default': ''},
'channel': {'type': 'string', 'default': ''},
'priority': {'type': 'integer', 'default': 2},
'message': {'type': 'string', 'default': '{{title}}'},
'notify_accepted': {'type': 'boolean', 'default': True},
'notify_rejected': {'type': 'boolean', 'default': False},
'notify_failed': {'type': 'boolean', 'default': False},
'notify_undecided': {'type': 'boolean', 'default': False}
},
'additionalProperties': False,
'required': ['apikey']
}
# Run last to make sure other outputs are successful before sending notification
@plugin.priority(0)
def on_task_output(self, task, config):
# get the parameters
if config['notify_accepted']:
log.debug("Notify accepted entries")
self.process_notifications(task, task.accepted, config)
if config['notify_rejected']:
log.debug("Notify rejected entries")
self.process_notifications(task, task.rejected, config)
if config['notify_failed']:
log.debug("Notify failed entries")
self.process_notifications(task, task.failed, config)
if config['notify_undecided']:
log.debug("Notify undecided entries")
self.process_notifications(task, task.undecided, config)
# Process the given events.
def process_notifications(self, task, entries, config):
for entry in entries:
if task.options.test:
log.info("Would send RapidPush notification about: %s", entry['title'])
continue
log.info("Send RapidPush notification about: %s", entry['title'])
apikey = entry.get('apikey', config['apikey'])
if isinstance(apikey, list):
apikey = ','.join(apikey)
title = config['title']
try:
title = entry.render(title)
except RenderError as e:
log.error('Error setting RapidPush title: %s' % e)
message = config['message']
try:
message = entry.render(message)
except RenderError as e:
log.error('Error setting RapidPush message: %s' % e)
# Check if we have to send a normal or a broadcast notification.
if not config['channel']:
priority = entry.get('priority', config['priority'])
category = entry.get('category', config['category'])
try:
category = entry.render(category)
|
except RenderError as e:
log.error('Error setting RapidPush category: %s' % e)
group = entry.get('group', config['group'])
try:
group = entry.render(group)
except RenderError as e:
log.error('Error setting RapidPush group: %s' % e)
# Send the request
data_string = json.dumps({
|
'title': title,
'message': message,
'priority': priority,
'category': category,
'group': group})
data = {'apikey': apikey, 'command': 'notify', 'data': data_string}
else:
channel = config['channel']
try:
channel = entry.render(channel)
except RenderError as e:
log.error('Error setting RapidPush channel: %s' % e)
# Send the broadcast request
data_string = json.dumps({
'title': title,
'message': message,
'channel': channel})
data = {'apikey': apikey, 'command': 'broadcast', 'data': data_string}
try:
response = task.requests.post(url, data=data, raise_status=False)
except RequestException as e:
log.error('Error sending data to rapidpush: %s' % e)
continue
json_data = response.json()
if 'code' in json_data:
if json_data['code'] == 200:
log.debug("RapidPush message sent")
else:
log.error(json_data['desc'] + " (" + str(json_data['code']) + ")")
else:
for item in json_data:
if json_data[item]['code'] == 200:
log.debug(item + ": RapidPush message sent")
else:
log.error(item + ": " + json_data[item]['desc'] + " (" + str(json_data[item]['code']) + ")")
@event('plugin.register')
def register_plugin():
plugin.register(OutputRapidPush, 'rapidpush', api_ver=2)
|
dtrodrigues/nifi-minifi-cpp
|
docker/test/integration/minifi/core/RemoteProcessGroup.py
|
Python
|
apache-2.0
| 353 | 0 |
import uuid
class RemoteProcessGroup(object):
def __init__(self, url, name=Non
|
e):
self.uuid = uuid.uuid4()
if name is None:
self.name = str(self.uuid)
else:
self.name = n
|
ame
self.url = url
def get_name(self):
return self.name
def get_uuid(self):
return self.uuid
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/service-workers/service-worker/resources/update-claim-worker.py
|
Python
|
bsd-3-clause
| 661 | 0.001513 |
import time
script = u'''
// Time stamp: %s
// (This ens
|
ures the source text is *not* a byte-for-byte match with any
// previously-fetc
|
hed version of this script.)
// This no-op fetch handler is necessary to bypass explicitly the no fetch
// handler optimization by which this service worker script can be skipped.
addEventListener('fetch', event => {
return;
});
addEventListener('install', event => {
event.waitUntil(self.skipWaiting());
});
addEventListener('activate', event => {
event.waitUntil(self.clients.claim());
});'''
def main(request, response):
return [(b'Content-Type', b'application/javascript')], script % time.time()
|
TheWardoctor/Wardoctors-repo
|
script.stargate.guide/ResizeLogos.py
|
Python
|
apache-2.0
| 1,738 | 0.010357 |
import xbmc
import xbmcgui
import xbmcaddon
import xbmcvfs
from PIL import Image, ImageOps
ADDON = xbmcaddon.Addon(id = 'script.stargate.guide')
def autocrop_image(infile,outfile):
infile = xbmc.translatePath(infile)
image = Image.open(infile)
border = 0
size = image.size
bb_image = image
bbox = bb_image.getbbox()
if (size[0] == bbox[2]) and (size[1] == bbox[3]):
bb_image=bb_image.convert("RGB")
bb_image = ImageOps.invert(bb_image)
bbox = bb_image.getbbox()
image = image.crop(bbox)
(width, height) = image.size
width += border * 2
height += border * 2
ratio = float(width)/height
cropped_image = Image.new("RGBA", (width, height), (0,0,0,0))
cropped_image.paste(image, (border, border))
#TODO find epg height
logo_height = 450 / int(ADDON.getSetting('channels.per.page'))
logo_height = logo_height - 2
cropped_image = cropped_image.resize((int(logo_height*ratio), logo_height),Image.ANTIALIAS)
outfile = xbmc.translatePath(outfile)
cropped_image.save(outfile)
d = xbmcgui.Dialog()
old_path = d.browse(0, 'Source Logo Folder', 'files', '', False, False, 'special://home/')
if not old_path:
|
quit()
new_path = d.browse(0, 'Destination Logo Folder', 'files', '', False, False,'special://home/')
if not new_path or old_path == new_path:
quit()
dirs, files = xbmcvfs.listdir(old_path)
p = xbmcgui.DialogProgressBG()
p.create('TVGF', 'Processing Logos')
images = [f for f in files if f.endswith('.png')]
total = len(images)
i = 0
for f in ima
|
ges:
infile = old_path+f
outfile = new_path+f
autocrop_image(infile,outfile)
percent = 100.0 * i / total
i = i+1
p.update(int(percent),"TVGF",f)
p.close()
|
Partoo/scrapy
|
scrapy/contrib_exp/iterators.py
|
Python
|
bsd-3-clause
| 1,404 | 0.000712 |
from scrapy.http import Response
from scrapy.selector import Selector
def xml
|
iter_lxml(obj, nodename, namespace=None, prefix='x'):
from lxml import etree
reader = _StreamReader(obj)
tag = '{%s}%s' % (namespace, nodename) if namespace else nodename
iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding)
selxpath = '//' + ('%s:%s' % (prefix, nodename) if namespace else nodename)
for _, node in iterable:
nodetext = etree.tostring(node)
node.clear()
xs = Selector(text=nodetext, type='xml')
if namespace:
|
xs.register_namespace(prefix, namespace)
yield xs.xpath(selxpath)[0]
class _StreamReader(object):
def __init__(self, obj):
self._ptr = 0
if isinstance(obj, Response):
self._text, self.encoding = obj.body, obj.encoding
else:
self._text, self.encoding = obj, 'utf-8'
self._is_unicode = isinstance(self._text, unicode)
def read(self, n=65535):
self.read = self._read_unicode if self._is_unicode else self._read_string
return self.read(n).lstrip()
def _read_string(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e]
def _read_unicode(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e].encode('utf-8')
|
hasgeek/hasjob
|
instance/testing.py
|
Python
|
agpl-3.0
| 602 | 0 |
import os
#: The title of this site
SITE_TITLE = 'Job
|
Board'
#: Database backend
SQLALCHEMY_DATABASE_URI = 'postgresql:///hasjob_testing'
SERVER_NAME = 'hasjob.travis.local:5000'
#: LastUser server
LAS
|
TUSER_SERVER = 'https://hasgeek.com/'
#: LastUser client id
LASTUSER_CLIENT_ID = os.environ.get('LASTUSER_CLIENT_ID', '')
#: LastUser client secret
LASTUSER_CLIENT_SECRET = os.environ.get('LASTUSER_CLIENT_SECRET', '')
STATIC_SUBDOMAIN = 'static'
ASSET_SERVER = 'https://static.hasgeek.co.in/'
ASSET_MANIFEST_PATH = "static/build/manifest.json"
# no trailing slash
ASSET_BASE_PATH = '/static/build'
|
FinalsClub/karmaworld
|
karmaworld/apps/document_upload/migrations/0002_auto__add_field_rawdocument_user.py
|
Python
|
agpl-3.0
| 6,785 | 0.008106 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('users', '0001_initial'),
)
def forwards(self, orm):
# Adding field 'RawDocument.user'
db.add_column('document_upload_rawdocument', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['users.KarmaUser'], null=True, on_delete=models.SET_NULL),
keep_default=False)
def backwards(self, orm):
# Deleting field 'RawDocument.user'
db.delete_column('document_upload_rawdocument', 'user_id')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.course': {
'Meta': {'ordering': "['-file_count', 'school', 'name']", 'unique_together': "(('school', 'name', 'instructor_name'),)", 'object_name': 'Course'},
'academic_year': ('django.db.
|
models
|
.fields.IntegerField', [], {'default': '2013', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'max_length': '511', 'null': 'True', 'blank': 'True'}),
'file_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'instructor_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.School']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150', 'null': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '511', 'null': 'True', 'blank': 'True'})
},
'courses.school': {
'Meta': {'ordering': "['-file_count', '-priority', 'name']", 'object_name': 'School'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'file_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '511', 'blank': 'True'}),
'usde_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'document_upload.rawdocument': {
'Meta': {'ordering': "['-uploaded_at']", 'object_name': 'RawDocument'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'fp_file': ('django_filepicker.models.FPFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.KarmaUser']", 'null': 'True', 'on_delete': 'models.SET_NULL'})
},
'taggit.tag': {
'Meta': {'ordering': "['namespace', 'name']", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'users.karmauser': {
'Meta': {'object_name': 'KarmaUser'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['document_upload']
|
chen0040/pyalgs
|
pyalgs/__init__.py
|
Python
|
bsd-3-clause
| 291 | 0.006873 |
# -*- coding: utf-8 -*-
|
"""
pyalgs
~~~~~
pyalgs provides the python implementation of the Robert Sedgwick's Coursera course on Algorithms (Part I and Part II).
:copyright: (c) 2017 by Xianshun Chen.
:license: BSD, see LICENSE for more details.
"""
__vers
|
ion__ = '0.0.14'
|
funbaker/astropy
|
astropy/table/__init__.py
|
Python
|
bsd-3-clause
| 2,381 | 0.00378 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .. import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <http://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
['slice'],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list',
)
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases."
)
conf = Conf()
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
from .groups import TableGroups, ColumnGroups
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning)
from .operations import join, setdiff, hstack, vstack, unique, TableMergeError
from .bst import BST, FastBST, FastRBT
|
from .sor
|
ted_array import SortedArray
from .serialize import SerializedColumn
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from ..io import registry
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
from ..io.ascii import connect
from ..io.fits import connect
from ..io.misc import connect
from ..io.votable import connect
|
skade/scrapy-elasticsearch-bulk-item-exporter
|
scrapyelasticsearch.py
|
Python
|
bsd-3-clause
| 473 | 0.023256 |
"""An Itemexporter for scrapy that wri
|
tes elasticsearch bulk format"""
from scrapy.contrib.exporter import BaseItemExporter
from scrapy.contrib.exporter import JsonLinesItemExporter
class ElasticSearchBulkItemExporter(JsonLinesItemExporter):
def export_item(self, item):
requestdict = { "index": { "typ
|
e": item.__class__.__name__ } }
self.file.write(self.encoder.encode(requestdict) + '\n')
super(ElasticSearchBulkItemExporter, self).export_item(item)
|
greyfenrir/taurus
|
bzt/resources/locustio-taurus-wrapper.py
|
Python
|
apache-2.0
| 4,812 | 0.001455 |
#! /usr/bin/env python
import csv
import json
import os
import sys
import time
from collections import OrderedDict
from locust import main, events
from locust.exception import StopUser
from requests.exceptions import HTTPError
from bzt.utils import guess_csv_dialect
class LocustStarter(object):
def __init__(self):
super(LocustStarter, self).__init__()
self.fhd = None
self.writer = None
self.runner = None
self.locust_start_time = None
self.locust_stop_time = None
self.locust_duration = sys.maxsize
self.num_requests = sys.maxsize
if os.getenv("LOCUST_DURATION"):
self.locust_duration = float(os.getenv("LOCUST_DURATIO
|
N"))
if os.getenv("LOCUST_NUMREQUESTS"):
self.num_requests = float(os.getenv("LOCUST_NUMREQUESTS"))
def __check_limits(self):
i
|
f self.locust_start_time is None:
self.locust_start_time = time.time()
# Only raise an exception if the actual test is running
if self.locust_stop_time is None:
if time.time() - self.locust_start_time >= self.locust_duration:
raise StopUser('Duration limit reached')
if self.num_requests <= 0:
raise StopUser('Request limit reached')
def __getrec(self, request_type, name, response_time, response_length, exc=None):
rcode = '200' if exc is None else '500'
rmsg = 'OK' if exc is None else '%s' % exc
if isinstance(exc, HTTPError):
exc_message = str(exc)
rcode = exc_message[:exc_message.index(' ')]
rmsg = exc_message[exc_message.index(':') + 2:]
if isinstance(response_time, float):
response_time = int(round(response_time))
return OrderedDict([
('allThreads', self.runner.user_count if self.runner else 0),
('timeStamp', "%d" % (time.time() * 1000)),
('label', name),
('method', request_type),
('elapsed', response_time),
('bytes', response_length),
('responseCode', rcode),
('responseMessage', rmsg),
('success', 'true' if exc is None else 'false'),
# NOTE: might be resource-consuming
('Latency', 0),
])
def __on_init(self, **args):
if 'runner' in args:
self.runner = args['runner']
def __on_request_success(self, request_type, name, response_time, response_length, **args):
self.num_requests -= 1
self.writer.writerow(self.__getrec(request_type, name, response_time, response_length))
self.fhd.flush()
self.__check_limits()
def __on_request_failure(self, request_type, name, response_time, exception, response_length=0, **args):
self.num_requests -= 1
self.writer.writerow(self.__getrec(request_type, name, response_time, response_length, exception))
self.fhd.flush()
self.__check_limits()
def __on_exception(self, locust_instance, exception, tb, **args):
del locust_instance, tb
self.__on_request_failure('', '', 0, exception)
def __on_worker_report(self, client_id, data, **args):
if data['stats'] or data['errors']:
for item in data['stats']:
self.num_requests -= item['num_requests']
data['client_id'] = client_id
self.fhd.write("%s\n" % json.dumps(data))
self.fhd.flush()
self.__check_limits()
def __on_quit(self, **args):
self.locust_stop_time = time.time()
def execute(self):
events.init.add_listener(self.__on_init)
events.user_error.add_listener(self.__on_exception)
events.worker_report.add_listener(self.__on_worker_report)
events.quitting.add_listener(self.__on_quit)
if os.getenv("JTL"): # regular locust worker
fname = os.getenv("JTL")
self.fhd = open(fname, 'wt')
fieldnames = list(self.__getrec(None, None, None, None).keys())
dialect = guess_csv_dialect(",".join(fieldnames))
self.writer = csv.DictWriter(self.fhd, fieldnames=fieldnames, dialect=dialect)
self.writer.writeheader()
events.request.add_listener(self.__on_request_success)
events.request.add_listener(self.__on_request_failure)
elif os.getenv("WORKERS_LDJSON"): # master of distributed mode
fname = os.getenv("WORKERS_LDJSON")
self.fhd = open(fname, 'wt')
self.writer = None
else:
raise ValueError("Please specify JTL or WORKERS_LDJSON environment variable")
main.main()
self.fhd.close()
if __name__ == '__main__':
locust_starter = LocustStarter()
locust_starter.execute()
|
itielshwartz/BackendApi
|
lib/simplejson/tests/test_item_sort_key.py
|
Python
|
apache-2.0
| 1,154 | 0.004333 |
from unittest import TestCase
from operator import itemgetter
import simplejson as json
class TestItemSortKey(TestCase):
def test_simple_first(self):
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array':
|
[1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog',
'zeak': 'oh'}
self.assertEqual(
|
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
json.dumps(a, item_sort_key=json.simple_first))
def test_case(self):
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog',
'zeak': 'oh'}
self.assertEqual(
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=itemgetter(0)))
self.assertEqual(
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
|
haroldtreen/python_koans
|
runner/path_to_enlightenment.py
|
Python
|
mit
| 1,482 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Functions to load the test cases ("koans") that make up the
Path to Enlightenment.
'''
import io
import unittest
# The path to enlightenment starts with the following:
KOANS_FILENAME = 'koans.txt'
def filter_koan_names(lines):
'''
Strips leading and trailing whitespace, then filters out blank
lines and comment lines.
'''
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
if line:
yield line
return
def names_from_file(filename):
'''
Opens the given ``filename`` and yields the fully-qualified names
of TestCases found inside (one per line).
'''
with io.open(filename, 'rt', encoding='utf8') as names_file:
for name in filter_koan_names(names_file):
yield name
return
def koans_suite(names):
'''
Returns a ``TestSuite`` loaded with all tests found in the given
``names``, preserving the order in which they are found.
'''
suite = unittest.TestSuite()
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
for name in names:
tests = loader.loadTestsFromName(na
|
me)
suite.addTests(tests)
return suite
def koans(filename=KOANS_FILENAME):
'''
Returns a ``TestSuite`` loaded with all the koans (``TestCase``s)
listed in ``filename``.
'''
names = names_
|
from_file(filename)
return koans_suite(names)
|
seccom-ufsc/hertz
|
hertz/settings.py
|
Python
|
mit
| 3,237 | 0.001236 |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['HERTZ_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ['HERTZ_DEBUG'] != 'False'
ALLOWED_HOSTS = ['*' if DEBUG else os.environ['HERTZ_HOST']]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'attendance',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hertz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hertz.wsgi.application'
# Database
if 'DATABASE_HOST' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': os.environ['POSTGRES_USER'],
'PASSWORD': os.environ['POSTGRES_PASSWORD'],
'HOST': os.environ['DATABASE_HOST'],
'PORT': 5432,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.M
|
inimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en
|
-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
|
karaage0703/denpa-gardening
|
send_mail.py
|
Python
|
mit
| 1,390 | 0.003644 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# send mail utf-8 using gmail smtp server /w jpegs
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.Header import Header
from email.Utils import formatdate
import smtplib
def send_email_with_jpeg(from_addr, to_addr, subject, body, jpegs=[], server='smtp.gmail.com', port=587):
|
encoding='utf-8'
msg = MIMEMultipart()
mt = MIMEText(body.encode(encoding), 'plain', encoding)
if jpegs:
for fn in jpegs:
img = open(fn, 'rb').read()
mj = MIMEImage(img, 'jpeg', filename=
|
fn)
mj.add_header("Content-Disposition", "attachment", filename=fn)
msg.attach(mj)
msg.attach(mt)
else:
msg = mt
msg['Subject'] = Header(subject, encoding)
msg['From'] = from_addr
msg['To'] = to_addr
msg['Date'] = formatdate()
_user = "xxxx@gmail.com"
_pass = "xxxxxxxxxxxxxxxxxxxx"
smtp = smtplib.SMTP(server, port)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(_user, _pass)
smtp.sendmail(from_addr, [to_addr], msg.as_string())
smtp.close()
###
if __name__ == '__main__':
body = u'\n%s\n' % (u'写真')
js = ['test.jpeg']
send_email_with_jpeg('xxxx@gmail.com', 'xxxx@blog.hatena.ne.jp', u'今日の家庭菜園', body, js)
|
WorkflowConversion/CTDConverter
|
ctdconverter/common/exceptions.py
|
Python
|
gpl-3.0
| 884 | 0 |
#!/usr/bin/env python
"""
@author: delagarza
"""
from CTDopts
|
.CTDopts import ModelError
class CLIError(Exception):
# Generic exception to raise and log different fatal errors.
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
class Invalid
|
ModelException(ModelError):
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return self.message
class ApplicationException(Exception):
def __init__(self, msg):
super(ApplicationException).__init__(type(self))
self.msg = msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
|
bmcculley/splinter
|
tests/test_zopetestbrowser.py
|
Python
|
bsd-3-clause
| 5,355 | 0.000378 |
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
import sys
from splinter import Browser
from .base import BaseBrowserTests
from .fake_webapp import EXAMPLE_APP
from .is_element_present_nojs import IsElementPresentNoJSTest
@unittest.skipIf(
sys.version_info[0] > 2,
"zope.testbrowser is not currently compatible with Python 3",
)
class ZopeTestBrowserDriverTest(
BaseBrowserTests, IsElementPresentNoJSTest, unittest.TestCase
):
@classmethod
def setUpClass(cls):
cls.browser = Browser("zope.testbrowser", wait_time=0.1)
def setUp(self):
self.browser.visit(EXAMPLE_APP)
@classmethod
def tearDownClass(self):
self.browser.quit()
def test_should_support_with_statement(self):
with Browser("zope.testbrowser"):
pass
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
self.assertIn(open(file_path).read().encode("utf-8"), html)
def test_forward_to_none_page(self):
"should not fail when trying to forward to none"
browser = Browser("zope.testbrowser")
browser.visit(EXAMPLE_APP)
browser.forward()
self.assertEqua
|
l(EXAMPLE_APP, browser.url)
browser.quit()
def test_cant_switch_to_frame(self):
"zope.testbrowser should not be able to switch to frames"
with self.assertRaises(NotImplementedError) as cm:
self.browser.get_iframe("frame_123")
self.fail()
e = cm.exception
self.assertEqual("zope.testbrowser doesn't support frames.", e.args[0])
def test_simple_type(self):
|
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method")
def test_simple_type_on_element(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").type("with type method")
def test_can_clear_password_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("password").first.clear()
def test_can_clear_tel_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("telephone").first.clear()
def test_can_clear_text_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").first.clear()
def test_slowly_typing(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method", slowly=True)
def test_slowly_typing_on_element(self):
"""
zope.testbrowser won't support type method
on element because it doesn't interac with JavaScript
"""
with self.assertRaises(NotImplementedError):
query = self.browser.find_by_name("query")
query.type("with type method", slowly=True)
def test_cant_mouseover(self):
"zope.testbrowser should not be able to put the mouse over the element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_over()
def test_cant_mouseout(self):
"zope.testbrowser should not be able to mouse out of an element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_out()
def test_links_with_nested_tags_xpath(self):
links = self.browser.find_by_xpath('//a/span[text()="first bar"]/..')
self.assertEqual(
len(links),
1,
'Found not exactly one link with a span with text "BAR ONE". %s'
% (map(lambda item: item.outer_html, links)),
)
def test_finding_all_links_by_non_ascii_text(self):
"should find links by non ascii text"
non_ascii_encodings = {
"pangram_pl": u"Jeżu klątw, spłódź Finom część gry hańb!",
"pangram_ja": u"天 地 星 空",
"pangram_ru": u"В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!",
"pangram_eo": u"Laŭ Ludoviko Zamenhof bongustas freŝa ĉeĥa manĝaĵo kun spicoj.",
}
for key, text in non_ascii_encodings.iteritems():
link = self.browser.find_link_by_text(text)
self.assertEqual(key, link["id"])
|
goday-org/learnPython
|
src/Arrays.py
|
Python
|
gpl-3.0
| 119 | 0.02521 |
# -*- coding:
|
utf-8 -*-
'''
Created on May 3, 2014
@author: andy
'''
#list Tuple
if
|
__name__ == '__main__':
pass
|
jgolebiowski/graphAttack
|
controlTrainRNN.py
|
Python
|
mit
| 5,411 | 0.000924 |
import graphAttack as ga
import numpy as np
import pickle
"""Control script"""
def run(simulationIndex, X, Y=None):
"""Run the model"""
print("Training with:", simulationIndex)
seriesLength, nFeatures = X.shape
# ------ it is important that the exampleLength is the same as
# ------ the number if examples in the mini batch so that
# ------ the state of the RNN is continously passed forward
exampleLength = 4
nExamples = exampleLength
nHidden0 = 25
nHidden1 = 25
mainGraph = ga.Graph(False)
dummyX = np.zeros((nExamples, exampleLength, nFeatures))
feed = mainGraph.addOperation(ga.Variable(dummyX), feederOperation=True)
# ------ Generate the network, options are RNN and LSTM gates
# ------ Add initial layer and then possibly append more
hactivations0, cStates0 = ga.addInitialLSTMLayer(mainGraph,
inputOperation=feed,
nHidden=nHidden0)
hactivations1, cStates1 = ga.appendLSTMLayer(mainGraph,
previousActivations=hactivations0,
nHidden=nHidden1)
# hactivations0 = ga.addInitialRNNLayer(mainGraph,
# inputOperation=feed,
# activation=ga.TanhActivation,
# nHidden=nHidden1)
# hactivations1 = ga.appendRNNLayer(mainGraph,
# previousActivations=hactivations0,
# activation=ga.TanhActivation,
# nHidden=nHidden1)
finalCost, costOperationsList = ga.addRNNCost(mainGraph,
hactivations1,
costActivation=ga.SoftmaxActivation,
costOperation=ga.CrossEntropyCostSoftmax,
nHidden=nHidden1,
labelsShape=feed.shape,
labels=None)
hactivations = [hactivations0, hactivations1]
cStates = [cStates0, cStates1]
nHiddenList = [nHidden0, nHidden1]
def fprime(p, data, labels, costOperationsList=costOperationsList, mainGraph=mainGraph):
mainGraph.feederOperation.assignData(data)
mainGraph.resetAll()
for index, cop in enumerate(costOperationsList):
cop.assignLabels(labels[:, index, :])
mainGraph.attachParameters(p)
c = mainGraph.feedForward()
mainGraph.feedBackward()
g = mainGraph.unrollGradients()
nLayers = len(hactivat
|
ions)
for i in range(nLayers):
hactivations[i][0].assignData(hactivations[i][-1].getValue())
cStates[i][0].assignData(cStates[i][-1].getValue())
return c, g
param0 = mainGraph.unrollGradientParam
|
eters()
print("Number of parameters to train:", len(param0))
adamGrad = ga.adaptiveSGDrecurrent(trainingData=X,
param0=param0,
epochs=1e3,
miniBatchSize=nExamples,
exampleLength=exampleLength,
initialLearningRate=1e-3,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
testFrequency=1e2,
function=fprime)
pickleFilename = "minimizerParamsRNN_" + str(simulationIndex) + ".pkl"
# with open(pickleFilename, "rb") as fp:
# adamParams = pickle.load(fp)
# adamGrad.restoreState(adamParams)
# params = adamParams["params"]
params = adamGrad.minimize(printTrainigCost=True, printUpdateRate=False,
dumpParameters=pickleFilename)
mainGraph.attachParameters(params)
cache = (nFeatures, nHiddenList, hactivations, cStates, costOperationsList)
return mainGraph, cache, adamGrad.costLists[-1]
if(__name__ == "__main__"):
# ------ This is a very limited dataset, load a lrger one for better results
pickleFilename = "dataSet/singleSentence.pkl"
with open(pickleFilename, "rb") as fp:
x, index_to_word, word_to_index = pickle.load(fp)
simulationIndex = 0
mainGraph, cache, finalCost = run(simulationIndex, x)
nFeatures, nHiddenList, hactivations, cStates, costOperationsList = cache
temp = ga.sampleManyLSTM(100, nFeatures, nHiddenList,
hactivations=hactivations,
cStates=cStates,
costOperationsList=costOperationsList,
mainGraph=mainGraph,
index_to_word=index_to_word)
print(temp)
# temp = ga.sampleManyRNN(100, nFeatures, nHiddenList,
# hactivations=hactivations,
# costOperationsList=costOperationsList,
# mainGraph=mainGraph,
# index_to_word=index_to_word)
# print(temp)
|
SonyStone/pylib
|
Practical Maya Programming/input.py
|
Python
|
mit
| 199 | 0.005025 |
fro
|
m sys import version_info
py3 = version_info[0] > 2
if py3:
response = input("Please enter your
|
name: ")
else:
response = raw_input("Please enter your name: ")
print("Hello " + response)
|
dpxxdp/berniemetrics
|
private/scrapers/realclearpolitics-scraper/scraper.py
|
Python
|
mit
| 1,355 | 0.005904 |
import os, scrapy, argparse
from realclearpolitics.spiders.spider import RcpSpider
from scrapy.crawler import CrawlerProcess
parser = argparse.ArgumentParser('Scrap realclearpolitics polls data')
parser.add_argument('url', action="store")
parser.add_argument('--locale', action="store", default='')
parser.add_argument('--race', action="store", default='primary')
parser.add_argument('--csv', dest='to_csv', action='store_true')
parser.add_argument('--output', dest='output', action='store')
args = parser.parse_args()
url = args.url
extra_fields = { 'locale': args.locale, 'race': args.race }
if (args.to_csv):
if args.output is None:
filename = url.split('/')[-1].split('.')[0
|
]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = args.output
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
else:
settings = {
'ITEM_PIPELINES' : {
'realclearpolitics.pipeline.PollPipeline': 300,
},
'LOG_LEVEL'
|
: 'ERROR',
'DOWNLOAD_HANDLERS' : {'s3': None,}
}
process = CrawlerProcess(settings);
process.crawl(RcpSpider, url, extra_fields)
process.start()
|
nephila/djangocms-apphook-setup
|
tests/test_utils/urls.py
|
Python
|
bsd-3-clause
| 842 | 0.002375 |
from cms.utils.conf import get_cms_setting
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.i18n import JavaScriptCatalog
from django.views.static import serve
admin.autodiscover()
urlpatterns = [
url(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT, "show_indexes": True}),
url(r"^media/cms/(?P<path>.*)$", serve, {"document_root": get
|
_cms_setting("MEDIA_ROOT"), "show_indexes": True}),
url(r"^jsi18n/$", JavaScriptCatalog.as_view(), name="javascript-catalog"),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += i18n_patterns(
url(r"^admin/", admin.site.urls),
url(r"^", include
|
("cms.urls")),
)
|
michardy/account-hijacking-prevention
|
hijackingprevention/api.py
|
Python
|
mit
| 4,967 | 0.02879 |
# The primary purpose of this module is to run
|
data hashing and comparison functions
# It is also called during the intialization of modules to register their hashing and comparison functions.
# Try to leave all the boilerplate calls like DB access and JSON decoding in the main.py file
import hijackingprevention.session as session
import hijackingprevention.api_user as api_user
from tornado import gen
import logging
logger = logging.getLogger(__name__)
OK = (200, 'OK')
class
|
Receiver():
"""This class handles all requests to hash and interpret data.
Most of the shared functionalty that underpins the API is defined here.
Functions in here fall into two catagories:
Functions that allow modules to register hashers or comparers.
Functions that run the registered hashers or comparers.
Please note:
If a function you are considering including in this file does not invoke a hasher, translator, or comparer it may not belong in this file.
"""
def __init__(self):
self.__hashers = {}
self.__translators = {}
self.__comparers = {}
self.__maxscores = {}
self.__callbacks = {}
# Module registration functions
def add_hasher(self, name, fxn):
"""This is called by each module on startup to register its sitewide hasher."""
self.__hashers[name] = fxn
def add_translator(self, name, fxn):
"""This is called by each module on startup to register its data translator.
Translators hash data, which has already been hashed by a (sitewide) hasher, with a per user salt.
"""
self.__translators[name] = fxn
def add_comparer(self, name, fxn, score):
"""This is called by each module on startup to register its hash comparer."""
self.__comparers[name] = fxn
self.__maxscores[name] = score
# Module users and API interface
@gen.coroutine
def add_data(self, req, headers, db):
"""This function is called when data is received from a browser to hash and store the data"""
# This function has, rather irritatingly, remained in spite of my attempt to keep database access out of the api.py file.
if req['name'] in self.__hashers.keys():
#setup to invoke hasher
site = api_user.Site(db)
yield site.get_by_client_key(req['ck'],
headers.get("Host"))
salt = site.get_salt(req['name'])
#invoke hasher
hash = yield self.__hashers[req['name']](req['data'],
headers, salt)
#store the result
site_id = site.get_id()
ses = session.Session(req['sid'], site_id, db) #setup session object
yield ses.read_db() #read session if it exists
ses.add_data({req['name']:hash}) #add data to session object
yield ses.write_out() #update session object in database
return(OK) #Nothing failed. Right? oops.
else:
logger.warning('Could not find a handler for "' + req['name'] + '"')
return(400, 'Err: Could not find a handler for "' + req['name'] + '"')
@gen.coroutine
def copy_data(self, ses, member):
"""This function is called to store session data permenantly to the user profile"""
data = {}
for dt in ses.keys():
data[dt] = yield self.__translators[dt](ses[dt])
member.add_data(data)
return(OK)
@gen.coroutine
def __calculate_sub_rating(self, data_type, session_dat, user_dat):
"""Calculate trust score for specific subtype of user data"""
sub_tot = 0
if data_type in user_dat and data_type in session_dat:
for h in user_dat[data_type]: #loop through all the user's hashed data of this type and compare it to the session
temp = (yield self.__comparers[data_type](
session_dat[data_type], h))
if temp > sub_tot: #fix security issue created by the previous ability to combine multiple low scores
sub_tot = temp
elif data_type not in session_dat and data_type in user_dat: #the user's session data may have expired or have not been collected
sub_tot = -1*self.__maxscores[data_type] #score nonexistant data negativly
return(sub_tot)
@gen.coroutine
def get_trust(self, session_dat, user_dat):
"""This scores how trustworthy a user is as a number between -1 and 1.
The score is based on how much session data matches the data stored in their user profile.
A score of 1 means that the user is perfectly trustworthy, a score of 0 means they cannot be trusted.
A negative score means that the session data has expired and an accurate determination cannot be made.
"""
total = 0 #total user score
actmax = 0 #maximum achievable total (A user with this score is PERFECT)
for dt in self.__comparers.keys(): #loop through all the types of data
actmax += self.__maxscores[dt] #add data type's max score to the maximum
total += yield self.__calculate_sub_rating(dt, session_dat, user_dat) #calculate sub score for given data type and add it to total
try:
return(200, str(total/actmax))
except ZeroDivisionError:
logger.critical('This server does not have any client data collection and analysis modules installed')
return(501, 'This server does not have any data collection and analysis modules installed')
|
kuldeep-k/pySocialFactory
|
socialFactory/core/AuthenticationError.py
|
Python
|
mit
| 56 | 0.017857 |
c
|
lass AuthenticationError
|
(Exception):
pass
|
gangadharkadam/shfr
|
frappe/widgets/event.py
|
Python
|
mit
| 1,523 | 0.0348 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Event
# -------------
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_cal_events(m_st, m_end):
# load owned events
res1 = frappe.db.sql("""select name from `tabEvent`
WHERE ifnull(event_date,'2000-01-01') between %s and %s and owner = %s
and event_type != 'Public' and event_type != 'Cancel'""",
(m_st, m_end, frappe.user.name))
# load individual events
res2 = frappe.db.sql("""select t1.name from `tabEvent` t1, `tabEvent User` t2
where ifnull(t1.event_date,'2000-01-01') between %s and %s and t2.person = %s
and t1.name = t2.parent and t1.event_type != 'Cancel'""",
(m_st, m_end, frappe.user.name))
# load role events
roles = frappe.user.get_roles()
myroles = ['t2.role = "%s"' % r.replace('"', '\"') for r in roles]
myroles = '(' + (' OR '.join(myroles)) + ')'
res3 = frappe.db.sql("""select t1.
|
name from `tabEvent` t1, `tabEvent Role` t2
where ifnull(t1.event_date,'2000-01-01') between %s and %s
and t1.name = t2.parent and t1.event_type != 'Cancel' and %s""" %
('%s', '%s', myroles), (m_st, m_end))
# load
|
public events
res4 = frappe.db.sql("select name from `tabEvent` \
where ifnull(event_date,'2000-01-01') between %s and %s and event_type='Public'",
(m_st, m_end))
doclist, rl = [], []
for r in res1 + res2 + res3 + res4:
if not r in rl:
doclist += frappe.get_doc('Event', r[0])
rl.append(r)
return doclist
|
twitter/pants
|
src/python/pants/reporting/reporting_server.py
|
Python
|
apache-2.0
| 17,903 | 0.00849 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import http.server
import itertools
import json
import logging
import mimetypes
import os
import pkgutil
import re
from builtins import bytes, object, open, range, str, zip
from collections import namedtuple
from datetime import date, datetime
from textwrap import dedent
import pystache
from future.moves.urllib.parse import parse_qs, urlencode, urlsplit, urlunparse
from pants.base.build_environment import get_buildroot
from pants.base.mustache import MustacheRenderer
from pants.base.run_info import RunInfo
from pants.pantsd.process_manager import ProcessManager
logger = logging.getLogger(__name__)
# Google Prettyprint plugin files.
PPP_RE = re.compile(r"^lang-.*\.js$")
class PantsHandler(http.server.BaseHTTP
|
RequestHandler):
"""A handler t
|
hat demultiplexes various pants reporting URLs."""
def __init__(self, settings, renderer, request, client_address, server):
self._settings = settings # An instance of ReportingServer.Settings.
self._root = self._settings.root
self._renderer = renderer
self._client_address = client_address
# The underlying handlers for specific URL prefixes.
self._GET_handlers = [
('/runs/', self._handle_runs), # Show list of known pants runs.
('/run/', self._handle_run), # Show a report for a single pants run.
('/browse/', self._handle_browse), # Browse filesystem under build root.
('/content/', self._handle_content), # Show content of file.
('/assets/', self._handle_assets), # Statically serve assets (css, js etc.)
('/poll', self._handle_poll), # Handle poll requests for raw file content.
('/latestrunid', self._handle_latest_runid), # Return id of latest pants run.
('/favicon.ico', self._handle_favicon) # Return favicon.
]
# TODO(#6071): BaseHTTPServer.BaseHTTPRequestHandler is an old-style class, so we must
# invoke its __init__ like this.
# TODO: Replace this entirely with a proper server as part of the pants daemon.
http.server.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""GET method implementation for BaseHTTPRequestHandler."""
if not self._client_allowed():
return
try:
(_, _, path, query, _) = urlsplit(self.path)
params = parse_qs(query)
# Give each handler a chance to respond.
for prefix, handler in self._GET_handlers:
if self._maybe_handle(prefix, handler, path, params):
return
# If no path specified, default to showing the list of all runs.
if path == '/':
self._handle_runs('', {})
return
content = 'Invalid GET request {}'.format(self.path).encode('utf-8'),
self._send_content(content, 'text/html', code=400)
except (IOError, ValueError):
pass # Printing these errors gets annoying, and there's nothing to do about them anyway.
#sys.stderr.write('Invalid GET request {}'.format(self.path))
def _handle_runs(self, relpath, params):
"""Show a listing of all pants runs since the last clean-all."""
runs_by_day = self._partition_runs_by_day()
args = self._default_template_args('run_list.html')
args['runs_by_day'] = runs_by_day
content = self._renderer.render_name('base.html', args).encode("utf-8")
self._send_content(content, 'text/html')
_collapsible_fmt_string = dedent("""
<div class="{class_prefix}" id="{id}">
<div class="{class_prefix}-header toggle-header" id="{id}-header">
<div class="{class_prefix}-header-icon toggle-header-icon" onclick="pants.collapsible.toggle('{id}')">
<i id="{id}-icon" class="visibility-icon icon-large icon-caret-right hidden"></i>
</div>
<div class="{class_prefix}-header-text toggle-header-text">
[<span id="{id}-header-text">{title}</span>]
</div>
</div>
<div class="{class_prefix}-content toggle-content nodisplay" id="{id}-content"></div>
</div>
""")
def _handle_run(self, relpath, params):
"""Show the report for a single pants run."""
args = self._default_template_args('run.html')
run_id = relpath
run_info = self._get_run_info_dict(run_id)
if run_info is None:
args['no_such_run'] = relpath
if run_id == 'latest':
args['is_latest'] = 'none'
else:
report_abspath = run_info['default_report']
report_relpath = os.path.relpath(report_abspath, self._root)
report_dir = os.path.dirname(report_relpath)
self_timings_path = os.path.join(report_dir, 'self_timings')
cumulative_timings_path = os.path.join(report_dir, 'cumulative_timings')
artifact_cache_stats_path = os.path.join(report_dir, 'artifact_cache_stats')
run_info['timestamp_text'] = \
datetime.fromtimestamp(float(run_info['timestamp'])).strftime('%H:%M:%S on %A, %B %d %Y')
timings_and_stats = '\n'.join([
self._collapsible_fmt_string.format(id='cumulative-timings-collapsible',
title='Cumulative timings', class_prefix='aggregated-timings'),
self._collapsible_fmt_string.format(id='self-timings-collapsible',
title='Self timings', class_prefix='aggregated-timings'),
self._collapsible_fmt_string.format(id='artifact-cache-stats-collapsible',
title='Artifact cache stats', class_prefix='artifact-cache-stats')
])
args.update({'run_info': run_info,
'report_path': report_relpath,
'self_timings_path': self_timings_path,
'cumulative_timings_path': cumulative_timings_path,
'artifact_cache_stats_path': artifact_cache_stats_path,
'timings_and_stats': timings_and_stats})
if run_id == 'latest':
args['is_latest'] = run_info['id']
content = self._renderer.render_name('base.html', args).encode("utf-8")
self._send_content(content, 'text/html')
def _handle_browse(self, relpath, params):
"""Handle requests to browse the filesystem under the build root."""
abspath = os.path.normpath(os.path.join(self._root, relpath))
if not abspath.startswith(self._root):
raise ValueError # Prevent using .. to get files from anywhere other than root.
if os.path.isdir(abspath):
self._serve_dir(abspath, params)
elif os.path.isfile(abspath):
self._serve_file(abspath, params)
def _handle_content(self, relpath, params):
"""Render file content for pretty display."""
abspath = os.path.normpath(os.path.join(self._root, relpath))
if os.path.isfile(abspath):
with open(abspath, 'rb') as infile:
content = infile.read()
else:
content = 'No file found at {}'.format(abspath).encode('utf-8')
content_type = mimetypes.guess_type(abspath)[0] or 'text/plain'
if not content_type.startswith('text/') and not content_type == 'application/xml':
# Binary file. Display it as hex, split into lines.
n = 120 # Display lines of this max size.
content = repr(content)[1:-1] # Will escape non-printables etc, dropping surrounding quotes.
content = '\n'.join([content[i:i + n] for i in range(0, len(content), n)])
prettify = False
prettify_extra_langs = []
else:
prettify = True
if self._settings.assets_dir:
prettify_extra_dir = os.path.join(self._settings.assets_dir, 'js', 'prettify_extra_langs')
prettify_extra_langs = [{'name': x} for x in os.listdir(prettify_extra_dir)]
else:
# TODO: Find these from our package, somehow.
prettify_extra_langs = []
linenums = True
args = {'prettify_extra_langs': prettify_extra_langs, 'content': content,
'prettify': prettify, 'linenums': linenums}
content = self._renderer.render_name('file_content.html', args).encode("utf-8")
self._send_content(content, 'text/html')
def _handle_asset
|
renatopp/liac-chess
|
main.py
|
Python
|
mit
| 25 | 0.04 |
import chess
chess
|
.run
|
()
|
LookThisCode/DeveloperBus
|
Season 2013/Bogota/Projects/06.Agile_Business/backend/app/models/user.py
|
Python
|
apache-2.0
| 231 | 0.021645 |
import logging
from google.appengine.ext import ndb
from endpoints_proto_datastore.ndb.m
|
odel import EndpointsModel, EndpointsAliasProperty
class UserModel(EndpointsModel):
email = ndb.StringProperty()
na
|
me = ndb.StringProperty()
|
sndnv/cadb
|
cadb/utils/Stats.py
|
Python
|
mit
| 9,700 | 0.005773 |
# MIT License
# Copyright (c) 2016 https://github.com/sndnv
# See the project's LICENSE file for the full text
def get_header_files_size_data(sources_dir, header_files_by_size, rows_count):
"""
Builds table rows list containing data about header files sizes (largest/smallest).
:param sources_dir: configured sources directory
:param header_files_by_size: list of header files ordered by size (smallest to largest)
:param rows_count: number of rows to build
:return: the requested table rows
"""
data = [
("-------------------", "----", "--------------------", "----"),
("Largest Header File", "Size", "Smallest Header File", "Size"),
("-------------------", "----", "--------------------", "----")
]
largest_headers = header_files_by_size[-rows_count:]
largest_headers.reverse()
smallest_headers = header_files_by_size[:rows_count]
for n in range(0, rows_count):
top = largest_headers[n] if len(largest_headers) > n else None
bottom = smallest_headers[n] if len(smallest_headers) > n else None
data.append(
(
top.file_path.replace(sources_dir, '~') if top is not None else "-",
"{0:.2f} KB".format(top.size / 1024) if top is not None else "-",
bottom.file_path.replace(sources_dir, '~') if bottom is not None else "-",
"{0:.2f} KB".format(bottom.size / 1024) if bottom is not None else "-"
)
)
return data
def get_implementation_files_size_data(sources_dir, implementation_files_by_size, rows_count):
"""
Builds table rows list containing data about implementation files sizes (largest/smallest).
:param sources_dir: configured sources directory
:param implementation_files_by_size: list of implementation files ordered by size (smallest to largest)
:param rows_count: number of rows to build
:return: the requested table rows
"""
data = [
("---------------------------", "----", "----------------------------", "----"),
("Largest Implementation File", "Size", "Smallest Implementation File", "Size"),
("---------------------------", "----", "----------------------------", "----")
]
largest_implementations = implementation_files_by_size[-rows_count:]
largest_implementations.reverse()
smallest_implementations = implementation_files_by_size[:rows_count]
for n in range(0, rows_count):
top = largest_implementations[n] if len(largest_implementations) > n else None
bottom = smallest_implementations[n] if len(smallest_implementations) > n else None
data.append(
(
top.file_path.replace(sources_dir, '~') if top is not None else "-",
"{0:.2f} KB".format(top.size / 1024) if top is not None else "-",
bottom.file_path.replace(sources_dir, '~') if bottom is not None else "-",
"{0:.2f} KB".format(bottom.size / 1024) if bottom is not None else "-"
)
)
return data
def get_header_files_deps_data(sources_dir, header_files_by_deps_count, rows_count):
"""
Builds table rows list containing data about header files dependency counts (most/least).
:param sources_dir: configured sources directory
:param header_files_by_deps_count: list of header files ordered by dependency count (least to most)
:param rows_count: number of rows to build
:return: the requested table rows
"""
data = [
("-----------------------------", "-----", "------------------------------", "-----"),
("Most Dependencies Header File", "Count", "Least Dependencies Header File", "Count"),
("-----------------------------", "-----", "------------------------------", "-----")
]
most_deps_headers = header_files_by_deps_count[-rows_count:]
most_deps_headers.reverse()
least_deps_headers = header_files_by_deps_count[:rows_count]
for n in range(0, rows_count):
top = most_deps_headers[n] if len(most_deps_headers) > n else None
bottom = least_deps_headers[n] if len(least_deps_headers) > n else None
if top is not None:
top_count = (len(top.internal_dependencies) + len(top.external_dependencies))
else:
top_count = "-"
if bottom is not None:
bottom_count = (len(bottom.internal_dependencies) + len(bottom.external_dependencies))
else:
bottom_count = "-"
data.append(
(
top.file_path.replace(sources_dir, '~') if top is not None else "-",
top_count,
bottom.file_path.replace(sources_dir, '~') if bottom is not None else "-",
bottom_count
)
)
return data
def get_implementation_files_deps_data(sources_dir, implementation_files_by_deps_count, rows_count):
"""
Builds table rows list containing data about implementation files dependency counts (most/least).
:param sources_dir: configured sources directory
:param implementation_files_by_deps_count: list of implementation files ordered by dependency count (least to most)
:param rows_count: number of rows to build
:return: the requested table rows
"""
data = [
("---------------------------", "-----", "----------------------------", "-----"),
("Most Dependencies Impl File", "Count", "Least Dependencies Impl File", "Count"),
("---------------------------", "-----", "----------------------------", "-----")
]
most_deps_implementations = implementation_files_by_deps_count[-rows_count:]
most_deps_implementations.reverse()
least_deps_implementations = implementation_files_by_deps_count[:rows_count]
for n in range(0, rows_count):
top = most_deps_implementations[n] if len(most_deps_implementations) > n else None
bottom = least_deps_implementations[n] if len(least_deps_implementations) > n else None
if top is not None:
top_count = (
len(top.internal_dependencies) + len(top.external_dependencies)
)
else:
top_count
|
= "-"
if bottom is not None:
bottom_count = (
|
len(bottom.internal_dependencies) + len(bottom.external_dependencies)
)
else:
bottom_count = "-"
data.append(
(
top.file_path.replace(sources_dir, '~') if top is not None else "-",
top_count,
bottom.file_path.replace(sources_dir, '~') if bottom is not None else "-",
bottom_count
)
)
return data
def get_internal_deps_data(sources_dir, internal_deps_by_use_count, internal_dependencies, rows_count):
"""
Builds table rows list containing data about internal dependency use counts (most/least).
:param sources_dir: configured sources directory
:param internal_deps_by_use_count: list of internal dependencies by use count (least to most)
:param internal_dependencies: dict with all internal dependencies
:param rows_count: number of rows to build
:return: the requested table rows
"""
data = [
("-----------------------------", "-----", "------------------------------", "-----"),
("Most Used Internal Dependency", "Count", "Least Used Internal Dependency", "Count"),
("-----------------------------", "-----", "------------------------------", "-----")
]
most_used_deps = internal_deps_by_use_count[-rows_count:]
most_used_deps.reverse()
least_used_deps = internal_deps_by_use_count[:rows_count]
for n in range(0, rows_count):
top = most_used_deps[n] if len(most_used_deps) > n else None
bottom = least_used_deps[n] if len(least_used_deps) > n else None
data.append(
(
top.replace(sources_dir, '~') if top is not None else "-",
len(internal_dependencies[top]) if top is not None else "-",
bottom.replace(sources_dir, '~') if bottom is not None else "-",
len(internal_depend
|
pczhaoyun/obtainfo
|
zinnia/urls/entries.py
|
Python
|
apache-2.0
| 335 | 0 |
"""Urls fo
|
r the Zinnia entries"""
from django.conf.urls import url
from django.conf.urls import patterns
from zinnia.views.entries import EntryDetail
urlpatterns = patterns(
'',
url(r'^(?P<yea
|
r>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$',
EntryDetail.as_view(),
name='zinnia_entry_detail'),
)
|
henkelis/sonospy
|
web2py/applications/admin/controllers/mercurial.py
|
Python
|
gpl-3.0
| 1,107 | 0.00813 |
from mercurial import cmdutil
_hgignore_content = """\
syntax: glob
*~
*.pyc
*.pyo
*.bak
cache/*
databases/*
sessions/*
errors/*
"""
def commit():
app = request.args[0]
path = apath(app, r=request)
uio = ui.ui()
uio.quiet = True
if not os.environ.get('HGUSER') and not uio.config("ui", "username"):
os.environ['HGUSER'] = 'web2py@localhost'
try:
r = hg.repository(ui=uio, path=path)
except:
r = hg.repository(ui=uio, path=path, create=True)
hgignore = os.path.join(path, '.hgignore')
if not os.path.exists(hgignore):
open(hgignore, 'w').write(_hgignore_content)
form = FORM('Comment:',INPUT(_name='comment',requires=IS_NOT_EMPTY()),
INPUT(_type='submit',_value='Commit'))
if form.accepts(request.vars,session):
oldid = r[r.lookup('.')]
cmdutil
|
.addremove(r)
r.commit(text=form.vars.comment)
if r[r.lookup('.')] == oldid:
response.flash = 'no changes'
files = r[r.lookup('.')].files()
return dict(form=form,files=TABLE(*[TR(file) for file in files]),repo
|
=r)
|
kionetworks/openstack-api-scripts
|
tenant_glance_images.py
|
Python
|
apache-2.0
| 1,996 | 0.002004 |
#!/usr/bin/python
import os
import json
from pprint import pprint
from os import environ as env
import glan
|
ceclient.exc
from collections import Counter
import novaclient.v1_1.client as nvclient
import glanceclient.v2.client as glclient
import keystoneclient.v2_0.client as ksclient
def get_nova_credentials():
cred = {}
cred['username'] = os.environ['OS_USERNAME']
cred['api_key'] = os.environ['OS_PASSWORD']
cred['auth_url'] = os.environ['OS_AUTH_URL']
cred['project_id'] = os.environ['OS_TE
|
NANT_NAME']
return cred
def main():
keystone = ksclient.Client(auth_url=env['OS_AUTH_URL'],
username=env['OS_USERNAME'],
password=env['OS_PASSWORD'],
tenant_name=env['OS_TENANT_NAME'])
credentials = get_nova_credentials()
glance_endpoint = keystone.service_catalog.url_for(service_type='image')
nc = nvclient.Client(**credentials)
gc = glclient.Client(glance_endpoint, token=keystone.auth_token)
L = []
for server in nc.servers.list(detailed=True):
imagedata = server.image
if imagedata:
try:
jsondata = json.dumps(imagedata['id'])
image_id = jsondata.translate(None, '"')
except ValueError:
print "Decoding JSON has failed"
try:
imageinfo = gc.images.get(image_id)
except glanceclient.exc.HTTPException:
continue
try:
jsondata = json.dumps(imageinfo['name'])
image_name = jsondata.translate(None, '"')
except ValueError:
print "Decoding JSON has failed"
L.append(image_name)
count = Counter(L)
print "***** %s *****" % os.environ['OS_TENANT_NAME']
for key, value in sorted(count.iteritems()):
print "%s,%d" % (key, value)
if __name__ == '__main__':
main()
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/backend/test/case/node/atan.py
|
Python
|
apache-2.0
| 767 | 0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Atan(Bas
|
e):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Atan',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arctan(x)
expect(node, inputs=[x], outputs=[y],
name='test_atan_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arctan(x)
expect(node, inputs=[x], outputs=[y],
|
name='test_atan')
|
FlorisTurkenburg/ManyMan
|
SCC_backend/server.py
|
Python
|
gpl-2.0
| 14,884 | 0.000336 |
"""
ManyMan - A Many-core Visualization and Management System
Copyright (C) 2012
University of Amsterdam - Computer Systems Architecture
Jimi van der Woning and Roy Bakker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from SocketServer import BaseRequestHandler as brh, TCPServer as tcps
from chip import Chip
from messageprocessor import MessageProcessor
from threading import Thread
from time import sleep, time
import SocketServer
import config
import json
import logging
import sys
import subprocess as sp
default_settings = {
'address': ['', 11111],
'dummy_mode': False,
'logging_format': '[%(asctime)s %(levelname)-5s] %(name)s: %(message)s',
'logging_datefmt': '%B %d, %H:%M:%S',
'log_filename': 'log',
'logging_to_console': True,
'logging_level': 'DEBUG',
'logging_level_console': 'INFO',
'max_output_msg_len': 100,
'status_frequency': 1,
'frequency_timeout': 5,
'frequency_scale_command': '/shared/jimivdw/jimivdw/tests/power/setpwr',
'chip_name': 'Intel SCC',
'chip_cores': 48,
'chip_orientation': [
[37, 39, 41, 43, 45, 47],
[36, 38, 40, 42, 44, 46],
[25, 27, 29, 31, 33, 35],
[24, 26, 28, 30, 32, 34],
[13, 15, 17, 19, 21, 23],
[12, 14, 16, 18, 20, 22],
[1, 3, 5, 7, 9, 11],
[0, 2, 4, 6, 8, 10]
],
'frequency_islands': [
[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11],
[12, 13],
[14, 15],
[16, 17],
[18, 19],
[20, 21],
[22, 23],
[24, 25],
[26, 27],
[28, 29],
[30, 31],
[32, 33],
[34, 35],
[36, 37],
[38, 39],
[40, 41],
[42, 43],
[44, 45],
[46, 47]
],
'voltage_islands': [
[0, 1, 2, 3, 12, 13, 14, 15],
[4, 5, 6, 7, 16, 17, 18, 19],
[8, 9, 10, 11, 20, 21, 22, 23],
[24, 25, 26, 27, 36, 37, 38, 39],
[28, 29, 30, 31, 40, 41, 42, 43],
[32, 33, 34, 35, 44, 45, 46, 47]
],
'frequency_dividers': {
800: 2,
533: 3,
400: 4,
320: 5,
267: 6,
229: 7,
200: 8,
178: 9,
160: 10,
145: 11,
133: 12,
123: 13,
114: 14,
107: 15,
|
100: 16
}
}
class Client:
"""Client object for storing front-end connections."""
def __init__(self, request, name):
self.request = request
self.name = name
self.initialized = False
class Server(SocketServer.TCPServer):
"""Server object. Sets up, handles and closes client connections."""
def __init__(self, address, chip, settings):
self.logger = logging.g
|
etLogger('Server')
self.chip = chip
self.settings = settings
self.connection_count = 0
self.clients = []
self.frequency_scaler = None
self.frequency_thread = None
self.logger.debug("Initialized on port %d" % address[1])
tcps.__init__(self, address, MessageHandler)
self.init_frequency_scaler()
return
def init_frequency_scaler(self):
"""Initialize the frequency scaler."""
self.frequency_scaler = FrequencyScaler(self, self.settings)
self.frequency_thread = Thread(
target=self.frequency_scaler.wait_for_assignment
)
self.frequency_thread.deamon = True
self.logger.info("Initialized the FrequencyScaler")
def serve_forever(self, max_lines):
"""Keep serving client connections."""
self.frequency_thread.start()
self.processor = MessageProcessor(self, max_lines)
self.logger.info("Started")
try:
tcps.serve_forever(self)
finally:
self.frequency_scaler.running = False
self.frequency_thread.join()
self.logger.info('Stopped the FrequencyScaler')
self.logger.info("Stopped")
def finish_request(self, request, client_address):
"""A client has successfully connected."""
self.logger.info("New connection from %s." % client_address[0])
self.connection_count += 1
client = Client(request, "Client%d" % self.connection_count)
self.clients.append(client)
self.RequestHandlerClass(request, client_address, self, client)
def close_request(self, request):
"""A client has disconnected."""
for client in self.clients:
if client.request == request:
self.logger.info("Closed connection to %s." % client.name)
self.clients.remove(client)
break
return tcps.close_request(self, request)
class MessageHandler(SocketServer.BaseRequestHandler):
"""Handler for all received messages. Calls the messageprocessor."""
def __init__(self, request, client_address, server, client):
self.logger = logging.getLogger('MessageHandler')
self.client = client
self.buffer = ""
brh.__init__(self, request, client_address, server)
return
def handle(self):
"""Handle all received messages."""
while True:
try:
data = self.request.recv(1024)
if not data:
break
if '\n' in data:
# A message is not complete until receiving linebreak
parts = data.split('\n')
self.server.processor.process(
self.client,
"%s%s" % (self.buffer, parts[0])
)
# Handle any adjacent fully received messages
for part in parts[1:-1]:
self.server.processor.process(self.client, part)
self.buffer = parts[-1]
else:
self.buffer += data
except:
self.logger.error("Exception occurred in MessageHandler")
break
class StatusSender:
"""Module that sends the chip status at adjustable intervals."""
def __init__(self, chip, server):
self.logger = logging.getLogger('StatusSender')
self.chip = chip
self.server = server
self.running = True
def send_forever(self, interval):
"""Keep sending the status messages on the specified interval."""
while self.running:
try:
sleep(1. / interval)
msg = {
'type': 'status',
'content': {
'chip': self.chip.as_dict()
}
}
for client in self.server.clients:
client.request.sendall("%s\n" % json.dumps(msg))
except Exception, e:
self.logger.warning(
'Exception occurred in StatusSender: %s' % e
)
class FrequencyScaler:
def __init__(self, server, settings):
self.server = server
self.settings = settings
self.logger = logging.getLogger('FrequencyScaler')
self.running = True
self.frequencies = [533] * 6
self.last_change = time()
self.changed = False
self.changed_island = None
def wait_for_assignment(self):
|
nhuntwalker/astroML
|
book_figures/chapter10/fig_LINEAR_BIC.py
|
Python
|
bsd-2-clause
| 3,400 | 0.001176 |
"""
BIC for LINEAR light curve
--------------------------
Figure 10.19
BIC as a function of the number of frequency components for the light curve
shown in figure 10.18. BIC for the two prominent frequency peaks is shown. The
inset panel details the area near the maximum. For both frequencies, the BIC
peaks at between 10 and 15 terms; note that a high value of BIC is achieved
already with 6 components. Comparing the two, the longer period model (bottom
panel) is much more significant.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.time_series import multiterm_periodogram, lomb_scargle_BIC
from astroML.datasets import fetch_LINEAR_sample
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the data
data = fetch_LINEAR_sample()
t, y, dy = data[14752041].T
omega0 = 17.217
# focus only on the region with the peak
omega1 = np.linspace(17.213, 17.220, 100)
omega2 = 0.5 * omega1
#------------------------------------------------------------
# Compute the delta BIC
terms = np.arange(1, 21)
BIC_max = np.zeros((2, len(terms)))
for i, omega in enumerate([omega1, omega2]):
for j in range(len(terms)):
P = multiterm_periodogram(t, y, dy, omega, terms[j])
BIC = lomb_scargle_BIC(P, y, dy, n_harmonics=terms[j])
BIC_max[i, j] = BIC.max()
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
ax = [fig.add_axes((0.15, 0.53, 0.8, 0.37)),
fig.add_axes((0.15, 0.1, 0.8, 0.37))]
ax_inset = [fig.add_axes((0.15 + 7 * 0.04, 0.55, 0.79 - 7 * 0.04, 0.17)),
fig.add_axes((0.15 + 7 * 0.04, 0.12, 0.79 - 7 * 0.04, 0.17))]
ylims = [(22750, 22850),
(26675, 26775)]
omega0 = [17.22, 8.61]
for i in range(2):
# Plot full panel
ax[i].plot(terms, BIC_max[i], '-k')
ax[i].set_xlim(0, 20)
ax[i].set_ylim(0, 30000)
ax[i].text(0.02, 0.95, r"$\omega_0 = %.2f$" % omega0[i],
ha='left', va='top', transform=ax[i].transAxes)
ax[i].set_ylabel(r'$\Delta BIC$')
if i == 1:
ax[i].set_xlabel('N frequencies')
ax[i].grid(color='gray')
# plot inset
ax_inset[
|
i].plot(terms, BIC_max[i], '-k')
ax_inset[i].xaxis.set_major_
|
locator(plt.MultipleLocator(5))
ax_inset[i].xaxis.set_major_formatter(plt.NullFormatter())
ax_inset[i].yaxis.set_major_locator(plt.MultipleLocator(25))
ax_inset[i].yaxis.set_major_formatter(plt.FormatStrFormatter('%i'))
ax_inset[i].set_xlim(7, 19.75)
ax_inset[i].set_ylim(ylims[i])
ax_inset[i].set_title('zoomed view')
ax_inset[i].grid(color='gray')
plt.show()
|
GertBurger/pcapcffi
|
tests/test_pcapcffi.py
|
Python
|
bsd-3-clause
| 542 | 0 |
#!/usr/bin/env python
#
|
-*- coding: utf-8 -*-
"""
test_pcapcffi
----------------------------------
Tests for `pcapcffi` module.
"""
import pytest
import pcapcffi
from pcapcffi.wrappers import PcapError
def test_findalldevs():
devs = pcapcffi.wrappers.pcap_findalldevs()
assert devs
def test_pcap():
pcap = pcapcffi.Pcap()
assert pcap._pcap_t is None
assert not pcap.activated
with pytest.raises(PcapError):
pcap.snaplen()
with pytest.raises(PcapError):
pcap.datalinks()
pcap.c
|
lose()
|
xpspectre/multiple-myeloma
|
prep_patient_data.py
|
Python
|
mit
| 8,458 | 0.002365 |
# Prepare per-patient clinical data
# TODO: Possibly incorporate Palumbo data
import os
from load_patient_data import load_per_patient_data
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
# Load input data
data, per_patient_dict, per_patient_fields = load_per_patient_data()
# Dictionary of categorical mappings
cats = {}
# Build a cleaned up dataset data
# Lots of the cols are redundant
# Build a separate dataset of endpoints endp
endp = data[['PUBLIC_ID']]
# Study ID - CoMMpass vs Palumbo
# For now, we'll just take the CoMMpass patients
study = data[['PUBLIC_ID']]
# Therapy info - this is a simplified version of the stuff in the other table, but is enough to get started with coarse
# classes
treat = data[['PUBLIC_ID']]
# Keep PUBLIC_ID as the main index
# Drop informed consent date D_PT_ic_day
# Drop reason for dropping study D_PT_disc_com
data.drop(['D_PT_ic_day', 'D_PT_disc_com'], axis=1, inplace=True)
# Study end to endp
endp = endp.join(data['D_PT_lastdy'])
data.drop('D_PT_lastdy', axis=1, inplace=True)
# Nobody has completed the study, they're either still in it or dropped for some reason (including death?)
complete_map = {
'': np.nan,
'No': 0
}
cats['D_PT_DIDPATIENTCOM'] = complete_map
endp = endp.join(data['D_PT_DIDPATIENTCOM'].replace(complete_map))
data.drop('D_PT_DIDPATIENTCOM', axis=1, inplace=True)
# Primary reason for patient drop - death vs other
drop_reason_map = {
'': np.nan,
'Death': 1,
'Other': 0,
'Patient no longer consents to participate in the study': 0,
'Patient is lost to follow-up': 0,
'Inter-current illness that interferes with study assessments': 0,
'Noncompliance with study procedures': 0
}
cats['D_PT_PRIMARYREASON'] = drop_reason_map
endp = endp.join(data['D_PT_PRIMARYREASON'].replace(drop_reason_map))
data.drop('D_PT_PRIMARYREASON', axis=1, inplace=True)
# Cause of death due to MM or other
# For some analyses, other is a form of right-censoring
death_reason_map = {
'': np.nan,
'Disease Progression': 1,
'Other': 0
}
cats['D_PT_CAUSEOF
|
DEATH'] = death_reason_map
endp = endp.jo
|
in(data['D_PT_CAUSEOFDEATH'].replace(death_reason_map))
data.drop('D_PT_CAUSEOFDEATH', axis=1, inplace=True)
# Date of death
endp = endp.join(data['D_PT_deathdy'])
data.drop('D_PT_deathdy', axis=1, inplace=True)
# Drop some redundant cols that are hard to interpret
# A bunch of these are coded versions of more descriptive cols
# D_PT_trtstdy is just 1 for everyone
data.drop(['D_PT_complete', 'D_PT_discont', 'D_PT_DISCREAS', 'D_PT_dthreas', 'D_PT_raceoth', 'D_PT_race',
'D_PT_ethnic', 'D_PT_gender', 'D_PT_DIDTHEPATIENT', 'D_PT_screen', 'D_PT_trtstdy', 'D_PT_sdeathdy',
'D_PT_enr', 'D_PT_lvisit', 'D_PT_lvisitdy', 'D_PT_lvisitc'], axis=1, inplace=True)
# Last day seen alive is important for right-censoring
endp = endp.join(data['D_PT_lstalive'])
data.drop('D_PT_lstalive', axis=1, inplace=True)
# Keep age D_PT_age
# Drop Palumbo stuff for now
data.drop(['CLINICAL', 'RANDOM', 'gender_char', 'race_char', 'informed_consent_version', 'Date_of_diagnosis',
'ENROLLED'], axis=1, inplace=True)
# Keep apparant Palumbo col but actually applies to everyone: demog_height and demog_weight
# Standardize height and weight according to units
# Use cm for height
# Use kg for weight
height_map = {
'cm': 1.0,
'in': 2.54
}
data['DEMOG_HEIGHTUNITOFM'] = data['DEMOG_HEIGHTUNITOFM'].replace(height_map)
data['demog_height'] = data['demog_height'] * data['DEMOG_HEIGHTUNITOFM']
weight_map = {
'kg': 1.0,
'lb': 0.4536
}
data['DEMOG_WEIGHTUNITOFM'] = data['DEMOG_WEIGHTUNITOFM'].replace(weight_map)
data['demog_weight'] = data['demog_weight'] * data['DEMOG_WEIGHTUNITOFM']
# Drop cols for height and weight
data.drop(['DEMOG_HEIGHTUNITOFM', 'DEMOG_WEIGHTUNITOFM'], axis=1, inplace=True)
# ISS disease stage "endpoint": D_PT_iss
endp = endp.join(data['D_PT_iss'])
data.drop('D_PT_iss', axis=1, inplace=True)
# CoMMpass vs Palumbo patients
study_map = {
'CoMMpass': 1,
'Palumbo': 0
}
cats['STUDY_ID'] = study_map
study = study.join(data['STUDY_ID'].replace(study_map))
data.drop('STUDY_ID', axis=1, inplace=True)
# Drop redundant stage info
data.drop(['D_PT_issstage_char', 'D_PT_issstage'], axis=1, inplace=True)
# Preprocess basic treatment info
# 3 individual treatments: Bortezomib, Carfilzomib, IMIDs
# 3 dual treatments: bortezomib/carfilzomib, bortezomib/IMIDs, IMIDs/carfilzomib
# 1 triple treatment: bortezomib/IMIDs/carfilzomib
# Make indicators of presence of each treatment from D_PT_therclass col
# Could use a regex...
has_bor = (data['D_PT_therclass'] == 'Bortezomib-based') | \
(data['D_PT_therclass'] == 'combined bortezomib/carfilzomib-based') | \
(data['D_PT_therclass'] == 'combined bortezomib/IMIDs-based') | \
(data['D_PT_therclass'] == 'combined bortezomib/IMIDs/carfilzomib-based')
has_car = (data['D_PT_therclass'] == 'Carfilzomib-based') | \
(data['D_PT_therclass'] == 'combined bortezomib/carfilzomib-based') | \
(data['D_PT_therclass'] == 'combined IMIDs/carfilzomib-based') | \
(data['D_PT_therclass'] == 'combined bortezomib/IMIDs/carfilzomib-based')
has_imi = (data['D_PT_therclass'] == 'IMIDs-based') | \
(data['D_PT_therclass'] == 'combined bortezomib/IMIDs-based') | \
(data['D_PT_therclass'] == 'combined IMIDs/carfilzomib-based') | \
(data['D_PT_therclass'] == 'combined bortezomib/IMIDs/carfilzomib-based')
treat['TREAT_BOR'] = has_bor.astype(int) # True/False -> 1/0 map
treat['TREAT_CAR'] = has_car.astype(int)
treat['TREAT_IMI'] = has_imi.astype(int)
# Drop the rest of the treatment cols
data.drop(['D_PT_therclass', 'D_PT_therfstn', 'D_PT_therclassn', 'D_PT_maxline', 'ftrttrpl'], axis=1, inplace=True)
# Copy over the SCT (stem cell transplant) codes, but I don't know what they are
treat = treat.join(data[['sct_bresp', 'line1sct']])
data.drop(['sct_bresp', 'line1sct'], axis=1, inplace=True)
# What is PD? It sounds like a response, so move to endp table
endp = endp.join(data[['D_PT_pddy', 'D_PT_pdflag', 'D_PT_ttfpdw', 'D_PT_respdur', 'D_PT_mmstatus', 'D_PT_mmstatus1', 'D_PT_mmstatus2', 'D_PT_mmstatus3', 'D_PT_rapd', 'D_PT_dresp']])
data.drop(['D_PT_pddy', 'D_PT_pdflag', 'D_PT_ttfpdw', 'D_PT_respdur', 'D_PT_mmstatus', 'D_PT_mmstatus1', 'D_PT_mmstatus2', 'D_PT_mmstatus3', 'D_PT_rapd', 'D_PT_dresp'], axis=1, inplace=True)
# Drop redundant cols
data.drop(['demog_vj_interval', 'demog_visitdy'], axis=1, inplace=True)
# Keep race cols, lose other+unknown cols to get a linearly independent categorical set
# Keep DEMOG_AMERICANINDIA, DEMOG_BLACKORAFRICA, DEMOG_NATIVEHAWAIIA, DEMOG_WHITE, DEMOG_ASIAN and convert checked
checked_map = {
'': 0,
'Checked': 1
}
data['DEMOG_AMERICANINDIA'] = data['DEMOG_AMERICANINDIA'].replace(checked_map)
data['DEMOG_BLACKORAFRICA'] = data['DEMOG_BLACKORAFRICA'].replace(checked_map)
data['DEMOG_NATIVEHAWAIIA'] = data['DEMOG_NATIVEHAWAIIA'].replace(checked_map)
data['DEMOG_WHITE'] = data['DEMOG_WHITE'].replace(checked_map)
data['DEMOG_ASIAN'] = data['DEMOG_ASIAN'].replace(checked_map)
data.drop(['DEMOG_OTHER', 'DEMOG_SPECIFY'], axis=1, inplace=True)
# Gender - use this col since we know/control the coding
gender_map = {
'Male': 1,
'Female': 0,
'': np.nan
}
cats['DEMOG_GENDER'] = gender_map
data['DEMOG_GENDER'] = data['DEMOG_GENDER'].replace(gender_map)
# Ethnicity: Hispanic/Latino or not
eth_map = {
'Hispanic or Latino': 1,
'Not Hispanic or Latino': 0,
'Other': 0,
'': 0
}
cats['DEMOG_ETHNICITY'] = eth_map
data['DEMOG_ETHNICITY'] = data['DEMOG_ETHNICITY'].replace(eth_map)
data.drop(['DEMOG_SPECIFY2'], axis=1, inplace=True)
# Drop redundant visit and age cols
data.drop(['DEMOG_DAYOFVISIT', 'DEMOG_DAYOFBIRTH', 'DEMOG_PATIENTAGE', 'demog_visit', 'enr'], axis=1, inplace=True)
# print(data['DEMOG_ETHNICITY'].unique())
# print(data)
# print(treat)
# Save processed tables
output_dir = 'data/processed'
data.set_index('PUBLIC_ID', inplace=True)
data.to_csv(os.path.join(output_dir, 'patient_data.csv'))
endp.set_index('PUBLIC_ID', in
|
onaio/kpi
|
kpi/admin.py
|
Python
|
agpl-3.0
| 241 | 0 |
# coding: utf-8
from django.contrib import admin
from hub.
|
models import ExtraUserDetail
from .models import AuthorizedApplication
# Register your models here.
admin.
|
site.register(AuthorizedApplication)
admin.site.register(ExtraUserDetail)
|
scode/pants
|
tests/python/pants_test/backend/jvm/subsystems/test_shader.py
|
Python
|
apache-2.0
| 7,708 | 0.007654 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import tempfile
import unittest
from pants.backend.jvm.subsystems.shader import Shader, Shading
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_delete
from pants_test.subsystem.subsystem_util import subsystem_instance
class ShaderTest(unittest.TestCase):
def setUp(self):
self.jarjar = '/not/really/jarjar.jar'
with subsystem_instance(DistributionLocator):
executor = SubprocessExecutor(DistributionLocator.cached())
self.shader = Shader(jarjar=self.jarjar, executor=executor)
self.output_jar = '/not/really/shaded.jar'
def populate_input_jar(self, *entries):
fd, input_jar_path = tempfile.mkstemp()
os.close(fd)
self.addCleanup(safe_delete, input_jar_path)
with open_zip(input_jar_path, 'w') as jar:
for entry in entries:
jar.writestr(entry, '0xCAFEBABE')
return input_jar_path
def test_assemble_default_rules(self):
input_jar = self.populate_input_jar('org/pantsbuild/tools/fake/Main.class',
'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('org.pantsbuild.tools.fake.Main', input_jar)
self.assertEqual(Shader.exclude_package('org.pantsbuild.tools.fake'), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_default_rules_default_package(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('main', input_jar)
self.assertEqual(Shader.exclude_package(), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_custom_rules(self):
input_jar = self.populate_input_jar('main.class')
rules = self.shader.assemble_binary_rules('main', input_jar,
custom_rules=[Shader.shade_class('bob'),
Shader.exclude_class('fred')])
self.assertEqual(Shader.shade_class('bob'), rules[0])
self.assertEqual(Shader.exclude_class('fred'), rules[1])
self.assertEqual(Shader.exclude_package(), rules[2])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[3:])
def test_runner_command(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
custom_rules = [Shader.exclude_package('log4j', recursive=True)]
with self.shader.binary_shader(self.output_jar, 'main', input_jar,
custom_rules=custom_rules) as shader:
command = shader.command
self.assertTrue(command.pop(0).endswith('java'))
jar_or_cp = command.pop(0)
self.assertIn(jar_or_cp, {'-cp', 'classpath', '-jar'})
self.assertEqual(self.jarjar, os.path.abspath(command.pop(0)))
if jar_or_cp != '-jar':
# We don't really care what the name of the jarjar main class is - shader.command[2]
command.pop(0)
self.assertEqual('process', command.pop(0))
rules_file = command.pop(0)
self.assertTrue(os.path.exists(rules_file))
with open(rules_file) as fp:
lines = fp.read().splitlines()
self.assertEqual('rule log4j.** log4j.@1', lines[0]) # The custom rule.
self.assertEqual('rule * @1', lines[1]) # Exclude main's package.
self.assertIn('rule javax.annotation.* javax.annotation.@1', lines) # Exclude system.
self.assertEqual('rule com.google.common.base.* {}com.google.common.base.@1'
.format(Shading.SHADE_PREFIX), lines[-1]) # Shade the rest.
self.assertEqual(input_jar, command.pop(0))
self.assertEqual(self.output_jar, command.pop(0))
def test_sanitize_package_name(self):
def assert_sanitize(name, sanitized):
self.assertEqual(sanitized, Shading.Relocate._sanitize_package_name(name))
assert_sanitize('hello', 'hello')
assert_sanitize('hello.goodbye', 'hello.goodbye')
assert_sanitize('.hello.goodbye', 'hello.goodbye')
assert_sanitize('hello.goodbye.', 'hello.goodbye')
assert_sanitize('123', '_123')
assert_sanitize('123.456', '_123._456')
assert_sanitize('123.v2', '_123.v2')
assert_sanitize('hello-goodbye', 'hello_goodbye')
assert_sanitize('hello-/.goodbye.?', 'hello__.goodbye._')
assert_sanitize('one.two..three....four.', 'one.two.three.four')
def test_infer_shaded_pattern(self):
def assert_inference(from_pattern, prefix, to_pattern):
result = ''.join(Shading.Relocate._infer_shaded_pattern_iter(from_pattern, prefix))
self.assertEqual(to_pattern, result)
assert_inference('com.foo.bar.Main', None, 'com.foo.bar.Main')
assert_inference('com.foo.bar.', None, 'com.foo.bar.')
assert_inference('com.foo.bar.', '__prefix__.', '__prefix__.com.foo.bar.')
assert_inference('com.*.bar.', None, 'com.@1.bar.')
assert_inference('com.*.bar.*.', None, 'com.@1.bar.@2.')
assert_inference('com.*.bar.**', None, 'com.@1.bar.@2')
assert_inference('*', None, '@1')
assert_inference('**', None, '@1')
ass
|
ert_inference('**', '__prefix__.', '__prefix__.@1')
def test_
|
shading_exclude(self):
def assert_exclude(from_pattern, to_pattern):
self.assertEqual((from_pattern, to_pattern), Shading.Exclude.new(from_pattern).rule())
assert_exclude('com.foo.bar.Main', 'com.foo.bar.Main')
assert_exclude('com.foo.bar.**', 'com.foo.bar.@1')
assert_exclude('com.*.bar.**', 'com.@1.bar.@2')
def test_shading_exclude_package(self):
self.assertEqual(('com.foo.bar.**', 'com.foo.bar.@1'),
Shading.ExcludePackage.new('com.foo.bar').rule())
self.assertEqual(('com.foo.bar.*', 'com.foo.bar.@1'),
Shading.ExcludePackage.new('com.foo.bar', recursive=False).rule())
def test_relocate(self):
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.Relocate.new(from_pattern='com.foo.bar.**').rule())
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format('__my_prefix__.')),
Shading.Relocate.new(from_pattern='com.foo.bar.**',
shade_prefix='__my_prefix__.').rule())
self.assertEqual(('com.foo.bar.**', 'org.biz.baz.@1'.format('__my_prefix__.')),
Shading.Relocate.new(from_pattern='com.foo.bar.**',
shade_prefix='__my_prefix__.',
shade_pattern='org.biz.baz.@1').rule())
def test_relocate_package(self):
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.RelocatePackage.new('com.foo.bar').rule())
self.assertEqual(('com.foo.bar.*', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.RelocatePackage.new('com.foo.bar', recursive=False).rule())
self.assertEqual(('com.foo.bar.**', '__p__.com.foo.bar.@1'),
Shading.RelocatePackage.new('com.foo.bar', shade_prefix='__p__.').rule())
|
Stellarium/stellarium
|
util/skyTile.py
|
Python
|
gpl-2.0
| 4,700 | 0.004681 |
#!/usr/bin/python
#
# Fabien Chereau fchereau@eso.org
#
import gzip
import os
def writePolys(pl, f):
"""Write a list of polygons pl into the file f.
The result is under the form [[[ra1, de1],[ra2, de2],[ra3, de3],[ra4, de4]], [[ra1, de1],[ra2, de2],[ra3, de3]]]"""
f.write('[')
for idx, poly in enumerate(pl):
f.write('[')
for iv, v in enumerate(poly):
f.write('[%.8f, %.8f]' % (v[0], v[1]))
if iv != len(poly) - 1:
f.write(', ')
f.write(']')
if idx != len(pl) - 1:
f.write(', ')
f.write(']')
class StructCredits:
def __init__(self):
self.short = None
self.full = None
self.infoUrl = None
return
def outJSON(self, f, levTab):
if self.short != None:
f.write(levTab + '\t\t"short": "' + self.short + '",\n')
if self.full != None:
f.write(levTab + '\t\t"full": "' + self.full + '",\n')
if self.infoUrl != None:
f.write(levTab + '\t\t"infoUrl": "' + self.infoUrl + '",\n')
f.seek(-2, os.SEEK_CUR)
f.write('\n')
class SkyImageTile:
"""Contains all the properties needed to describe a multiresolution image tile"""
def __init__(self):
self.subTiles = []
self.imageCredits = StructCredits()
self.serverCredits = StructCredits()
self.imageInfo = StructCredits()
self.imageUrl = None
self.alphaBlend = None
self.maxBrightness = None
return
def outputJSON(self, prefix='', qCompress=False, maxLevelPerFile=10, outDir=''):
"""Output the tiles tree in the JSON format"""
fName = outDir + prefix + "x%.2d_%.2d_%.2d.json" % (2 ** self.level, self.i, self.j)
# Actually write the file with maxLevelPerFile level
with open(fName, 'w') as f:
self.__subOutJSON(prefix, qCompress, maxLevelPerFile, f, 0, outDir)
if (qCompress):
with open(fName) as ff:
fout = gzip.GzipFile(fName + ".gz", 'w')
fout.write(ff.read())
fout.close()
os.remove(fName)
def __subOutJSON(self, prefix, qCompress, maxLevelPerFile, f, curLev, outDir):
"""Write the tile in the file f"""
levTab = ""
for i in range(0, curLev):
levTab += '\t'
f.write(levTab + '{\n')
if self.imageInfo.short != None or self.imageInfo.full != None or self.imageInfo.infoUrl != None:
f.write(levTab + '\t"imageInfo": {\n')
self.imageInfo.outJSON(f, levTab)
f.write(levTab + '\t},\n')
if self.imageCredits.short != None or self.imageCredits.full != None or self.imageCredits.infoUrl != None:
f.write(levTab + '\t"imageCredits": {\n')
self.imageCredits.outJSON(f, levTab)
f.write(levTab + '\t},\n')
if self.serverCredits.short != None or self.serverCredits.full != None
|
or self.serverCredits.infoUrl != None:
f.write(levTab + '\t"serverCredits": {\n')
self.serverCredits.outJSON(f, levTab)
f.write(levTab + '\t},\n')
if self.imageUrl:
f.write(
|
levTab + '\t"imageUrl": "' + self.imageUrl + '",\n')
f.write(levTab + '\t"worldCoords": ')
writePolys(self.skyConvexPolygons, f)
f.write(',\n')
f.write(levTab + '\t"textureCoords": ')
writePolys(self.textureCoords, f)
f.write(',\n')
if self.maxBrightness:
f.write(levTab + '\t"maxBrightness": %f,\n' % self.maxBrightness)
if self.alphaBlend:
f.write(levTab + '\t"alphaBlend": true,\n')
f.write(levTab + '\t"minResolution": %f' % self.minResolution)
if not self.subTiles:
f.write('\n' + levTab + '}')
return
f.write(',\n')
f.write(levTab + '\t"subTiles": [\n')
if curLev + 1 < maxLevelPerFile:
# Write the tiles in the same file
for st in self.subTiles:
assert isinstance(st, SkyImageTile)
st.__subOutJSON(prefix, qCompress, maxLevelPerFile, f, curLev + 1, outDir)
f.write(',\n')
else:
# Write the tiles in a new file
for st in self.subTiles:
st.outputJSON(prefix, qCompress, maxLevelPerFile, outDir)
f.write(levTab + '\t\t{"$ref": "' + prefix + "x%.2d_%.2d_%.2d.json" % (2 ** st.level, st.i, st.j))
if qCompress:
f.write(".gz")
f.write('"},\n')
f.seek(-2, os.SEEK_CUR)
f.write('\n' + levTab + '\t]\n')
f.write(levTab + '}')
|
poorsquinky/traktor-tools
|
pl2dir.py
|
Python
|
gpl-3.0
| 4,990 | 0.012024 |
#!/usr/bin/python
# pl2dir - Generates a directory structure based on your Traktor playlists.
# Copyright (C) 2015 Erik Stambaugh
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program (see the file LICENSE); if not, see
# http://www.gnu.org/licenses/, or write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
from bs4 import BeautifulSoup
import os,re,sys
def help():
print "Usage: %s COLLECTION PATH1 [PATH2 ...]\n" % sys.argv[0]
print " COLLECTION is a Traktor collection_*.nml file"
print " PATH1, PATH2, etc. are lists of directories to recurse"
print " when searching for audio files"
try:
collection_filename = sys.argv[1]
collection_fh = open(collection_filename)
except (IndexError, IOError), e:
print "ERROR: invalid collection (%s)\n" % e
help()
sys.exit(1)
source_paths = sys.argv[2:]
if len(source_paths) < 1:
print "ERROR: No source paths specified\n"
help()
sys.exit(1)
soup = BeautifulSoup(collection_fh, "xml")
print '# Getting a full list of source files...'
allfiles=[]
pathdict={}
for srcpath in source_paths:
for path,dirs,files in os.walk(srcpath):
for f in files:
fullpath="%s/%s" % (path,
|
f)
allfiles.append(fullpath)
d = pathdict.get(f,[])
d.append(fullpath)
pathdict[
|
f] = d
### collection
class Track:
def __str__(self):
if self.artist:
return "%s - %s" % (self.artist.encode('utf8'),self.title.encode('utf8'))
return "%s" % (self.title.encode('utf8'))
def __unicode__(self):
return self.__str__()
def __init__(self,soup):
self.soup = soup
self.artist = soup.attrs.get('ARTIST','')
self.title = soup.attrs.get('TITLE','')
loc = soup.find('LOCATION')
self.drive = loc.attrs.get('VOLUME','')
self.dir = loc.attrs.get('DIR','')
self.filename = loc.attrs.get('FILE','')
self.pk = "%s%s%s" % (self.drive,self.dir,self.filename)
self.located = None
def find(self,pathdict):
if self.located is None:
if pathdict.has_key(self.filename):
self.located = pathdict[self.filename][0]
else:
print "# NOT FOUND: %s" % self.filename
### playlists
class Playlist:
def __str__(self):
return self.name.encode('utf8')
def __unicode__(self):
return self.__str__()
def __init__(self,soup,collection={}):
self.soup = soup
self.name = soup.attrs['NAME']
self.tracklist = []
self.tracks = []
for t in self.soup.find_all('PRIMARYKEY', attrs={'TYPE': 'TRACK'}):
self.tracklist.append(t["KEY"].encode('utf8'))
if collection.has_key(t["KEY"]):
track = collection[t['KEY']]
self.tracks.append(track)
else:
print "# ***NOT FOUND IN COLLECTION: %s" % t["KEY"]
def find_files(self, pathdict):
for t in self.tracks:
t.find(pathdict=pathdict)
collection={}
c = soup.find('COLLECTION')
for e in c.find_all('ENTRY'):
track = Track(e)
collection[track.pk] = track
playlists = []
pl = soup.find_all('NODE', attrs={"TYPE": "PLAYLIST"})
for l in pl:
playlist = Playlist(l, collection=collection)
playlists.append(playlist)
print "# Searching playlists..."
for l in playlists:
if len(l.tracks) > 0:
l.find_files(pathdict)
found = reduce(lambda x,y: x+y, map(lambda z: 0 if z.located is None else 1, l.tracks))
print "# %s - %s found %s not found" % (l, found, len(l.tracks) - found)
for l in playlists:
if len(l.tracks) > 0:
found = reduce(lambda x,y: x+y, map(lambda z: 0 if z.located is None else 1, l.tracks))
if found > 0:
dirname = re.sub(r'[^0-9a-zA-Z-_]','-',str(l))
print "mkdir %s" % dirname
for track in l.tracks:
if track.located is not None:
extension = re.sub('.*\.','',track.located)
target = re.sub('[^0-9a-zA-Z-_]','_',str(track))
target = re.sub('_+$','',target)
target = "%s.%s" % (target,extension)
print 'cp -lvf "%s" "%s/%s"' % (re.sub(r'(["$])',r'\\\1',track.located), dirname, target)
else:
print "# NO TRACKS FOUND: %s" % l
|
twister/twister.github.io
|
lib/TscTelnetLib.py
|
Python
|
apache-2.0
| 17,030 | 0.003288 |
# File: TscTelnetLib.py ; This file is part of Twister.
# version: 2.002
#
# Copyright (C) 2012 , Luxoft
#
# Authors:
# Adrian Toader <adtoader@luxoft.com>
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains Telnet connection functions.
"""
from telnetlib import Telnet
from time import sleep
#from time import time as epochtime
from thread import start_new_thread
#from os import remove, rename
#from os.path import dirname, exists, abspath, join, getsize
#Efrom json import load, dump
#__dir__ = dirname(abspath(__file__))
__all__ = ['TelnetManager', 'TelnetConnection']
#
class TelnetManager(object):
""" Twister Telnet connections manager """
def __init__(self):
""" init """
# connections are TelnetConnection instances
self.connections = {}
# active connection name; is used for all commands as default
# if no name is specified
self.activeConnection = None
def open_connection(self, name, host, port=23, user=None, password=None,
userExpect=None, passwordExpect=None, keepalive=True):
""" open a new TelnetConnection instance and add it to manager list """
if not self.connections.has_key(name):
connection = TelnetConnection(name, host, port, user, password,
userExpect, passwordExpect, keepalive)
self.connections.update([(name, connection), ])
return True
else:
print('telnet open connection error: connection name already in use')
return False
def login(self, name, user=None, password=None,
userExpect=None, passwordExpect=None):
""" login on telnet connection """
try:
return self.connections[name].login(user, password,
userExpect, passwordExpect)
except Exception, e:
print('telnet manager login error: {er}'.format(er=e))
return False
def write(self, command, name=None):
""" write command to telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].write(command)
elif self.activeConnection:
return self.connections[self.activeConnection].write(command)
return False
def read(self, name=None):
""" read from telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].read()
elif self.activeConnection:
return self.connections[self.activeConnection].read()
return False
def read_until(self, expected, name=None):
""" read from telnet connection until expected """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].read_until(expected)
elif self.activeConnection:
return self.connections[self.activeConnection].read_until(expected)
return False
def set_newline(self, newline, name=None):
""" set the new line char for telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].set_newline(newline)
elif self.activeConnection:
return self.connections[self.activeConnection].set_newline(newline)
return False
def set_timeout(self, timeout, name=None):
""" set timeout for operations on telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].set_timeout(timeout)
elif self.activeConnection:
return self.connections[self.activeConnection].set_timeout(timeout)
return False
def get_connection(self, name=None):
""" get the TelnetConnection instance """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name]
elif self.activeConnection:
return self.connections[self.activeConnection]
return False
def set_active_connection(self, name):
""" set the active connection """
if not self.connections.has_key(name):
print 'connection not found'
return False
self.activeConnection = name
return True
def list_connections(self):
""" list all connections """
return [name for name in self.connections.iterkeys()]
def close_connection(self, name=None):
""" close connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if not name and
|
self.activeConnection:
del(self.connections[self.activeConnection])
self.activeConnection = None
return True
try:
del(self.connections[name])
if name == self.activeConnection:
self.activeConnection = None
except Exception, e:
print('telnet manager error while closing connection: {er}'.format(er=e))
|
return False
return True
def close_all_connections(self):
""" close all connections """
del(self.connections)
self.connections = {}
self.activeConnection = None
print('all connections closed')
return True
class TelnetConnection:
""" tsc telnet connection """
def __init__(self, name, host, port=23, user=None, password=None,
userExpect=None, passwordExpect=None, keepalive=True):
""" init """
self.connection = None
self.host = host
self.port = port
self.loginAccount = {
'user': user,
'password': password
}
self.name = name
self.newline = '\n'
self.timeout = 4
self.keepAliveRetries = 0
self.keepAliveThread = None
self.keepAlive = keepalive
self.loginDriver = {
'userExpect': userExpect,
'passwordExpect': passwordExpect
}
"""
self.loginDrivers = None
self.loginDriversPath = join(__dir__, 'logindrivers.list')
self.loginDriversLockPath = join(__dir__, 'logindrivers.lock')
self.loadLoginDrivers()
"""
try:
self.connection = Telnet(self.host, self.port, self.timeout)
print('telnet connection created!')
self.login()
if self.keepAlive:
self.keepAliveThread = start_new_thread(self.keep_alive, ())
else:
self.keepAliveThread = None
except Exception, e:
self.connect
|
weissercn/learningml
|
learningml/GoF/chi2/gauss/miranda_adaptive_binning_systematics_Gaussian_same_projection_evaluation_of_optimised_classifiers.py
|
Python
|
mit
| 3,491 | 0.030077 |
import adaptive_binning_chisquared_2sam
import os
systematics_fraction = 0.01
dim_list = [1,2,3,4,5,6,7,8,9,10]
adaptive_binning=True
CPV = True
PLOT = True
if CPV:
orig_name="chi2_gauss_0_95__0_95_CPV_not_redefined_syst_"+str(systematics_fraction).replace(".","_")+"_"
orig_title="Gauss 0.95 0.95 syst{} adaptbin".format(systematics_fraction)
#orig_name="chi2_gauss_0_95__0_95_CPV_not_redefined_syst_"+str(systematics_fraction).replace(".","_")+"_euclidean_plot_"
#orig_title="Gauss 1.0 0.95 syst{} euclidean adaptbin".format(systematics_fraction)
else:
orig_name="chi2_gauss__1_0__1_0_noCPV_not_redefined_syst_"+str(systematics_fraction).replace(".","_")+"_"
orig_title="Gauss 1.0 1.0 syst{} adaptbin".format(systematics_fraction)
#orig_name="chi2_gauss__1_0__1_0_noCPV_not_redefined_syst_"+str(systematics_fraction).replace(".","_")+"_euclidean_"
#orig_title="Gauss 1.0 1.0 syst{} euclidean adaptbin".format(systematics_fraction)
if PLOT:
d
|
im_list = [2,6,10]
orig_name="plot_"+orig_name
orig_title= "Plot "+orig_title
sample_list_typical= [79, 74, 22]
sample_list= [[item,item+1] for item in sample_list_typical]
else:
sample_list = [range(100)]*len(dim_list)
comp_file_list_list = []
for dim_data_index, dim_data in enumerate(dim_list):
comp_file_list=[]
for i in sa
|
mple_list[dim_data_index]:
if CPV:
comp_file_list.append((os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_1.0_1.0_{0}.txt".format(i,dim_data),os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_0.95_0.95_{0}.txt".format(i,dim_data)))
#comp_file_list.append((os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_1.0_1.0_{0}_euclidean.txt".format(i,dim_data),os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_0.95_0.95_{0}_euclidean.txt".format(i,dim_data)))
else:
comp_file_list.append((os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_1.0_1.0_{0}.txt".format(i,dim_data),os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_1.0_1.0_1{0}.txt".format(str(i).zfill(2),dim_data)))
#comp_file_list.append((os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_1.0_1.0_{0}_euclidean.txt".format(i,dim_data),os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{1}D_10000_0.0_1.0_1.0_1{0}_euclidean.txt".format(str(i).zfill(2),dim_data)))
comp_file_list_list.append(comp_file_list)
if adaptive_binning==True:
if PLOT: number_of_splits_list = [3]
else: number_of_splits_list = [1,2,3,4,5,6,7,8,9,10]
adaptive_binning_chisquared_2sam.chi2_adaptive_binning_wrapper(orig_title, orig_name, dim_list, comp_file_list_list,number_of_splits_list,systematics_fraction)
else:
single_no_bins_list=[2,3,5]
adaptive_binning_chisquared_2sam.chi2_regular_binning_wrapper(orig_title, orig_name, dim_list, comp_file_list_list,single_no_bins_list,systematics_fraction)
|
wfxiang08/sqlalchemy
|
examples/generic_associations/__init__.py
|
Python
|
mit
| 748 | 0.005348 |
"""
Illustrates various methods of associating multiple types of
parents with a particular c
|
hild object.
The examples all use the declarative extension along with
declarative mixins. Each one presents the identical use
case at the end - two classes, ``Customer`` and ``Supplier``, both
subclassing the ``HasAddresses`` mixin, which ensures that the
parent class is provided with an ``addresses`` collection
which contains ``Address`` objects.
The :viewsource:`.discriminator_on_association` and :viewsource:`.generic_fk` scripts
are modernized versions of recipes presented in the
|
2007 blog post
`Polymorphic Associations with SQLAlchemy <http://techspot.zzzeek.org/2007/05/29/polymorphic-associations-with-sqlalchemy/>`_.
.. autosource::
"""
|
googleapis/storage-testbench
|
tests/test_error.py
|
Python
|
apache-2.0
| 3,099 | 0.001291 |
#!/usr/bin/env python3
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for error handling helpers."""
import unittest
from unittest.mock import ANY, Mock
import grpc
from testbench import error
class TestError(unittest.TestCase):
def test_csek(self):
with self.assertRaises(error.RestException) as rest:
error.csek(None)
self.assertEqual(rest.exception.code, 400)
context = Mock()
error.csek(context)
context.abort.assert_called_once_with(grpc.StatusCode.INVALID_ARGUMENT, ANY)
def test_invalid(self):
with self.assertRaises(error.RestException) as rest:
error.invalid("bad bucket name", None)
self.assertEqual(rest.exception.code, 400)
context = Mock()
error.invalid("bad bucket name", context)
context.abort.assert_called_once_with(grpc.StatusCode.INVALID_ARGUMENT, ANY)
def test_missing(self):
with self.assertRaises(error.RestException) as rest:
error.missing("object name", None)
self.assertEqual(rest.exception.code, 400)
context = Mock()
error.missing("object name", context)
context.abort.assert_called_once_with(grpc.StatusCode.INVALID_ARGUMENT, ANY)
def test_mismatch(self):
with self.assertRaises(error.RestException) as rest:
error.mismatch("ifGenerationMatch", "0", "123", None)
self.assertEqual(rest.exception.code, 412)
context = Mock()
error.mismatch("ifGenerationMatch", "0", "123", context)
context.abort.assert_called_once_with(grpc.StatusCode.FAILED_PRECONDITION, ANY)
def test_notchanged(self):
with self.assertRaises(error.RestException) as rest:
error.notchanged("ifGenerationNotMatch:7", None)
self.assertEqual(rest.exception.code, 304)
context = Mock()
error.notchanged("ifGenerationNotMatch:7", context)
context.abort.asse
|
rt_called_once_with(grpc.StatusCode.ABORTED, ANY)
def test_notfound(self):
with self.assertRaises(error.RestException) as rest:
error.notfound("test-object", None)
self.assertEqual(rest.exception.code, 404)
context = Mock()
e
|
rror.notfound("test-object", context)
context.abort.assert_called_once_with(grpc.StatusCode.NOT_FOUND, ANY)
def test_not_allowed(self):
with self.assertRaises(error.RestException) as rest:
error.notallowed(None)
self.assertEqual(rest.exception.code, 405)
if __name__ == "__main__":
unittest.main()
|
EmilienDupont/cs229project
|
convertToSparseMatrix.py
|
Python
|
mit
| 3,343 | 0.007777 |
"""
Script used to convert data into sparse matrix format that
can easily be imported into MATLAB.
Use like this
python convertToSparseMatrix.py ../../../../../data/train_triplets.txt 1000 ../../../../../data/eval/year1_test_triplets_visible.txt ../../../../../data/eval/year1_test_triplets_hidden.txt 100
"""
import sys
import time
# An
|
alysing command line arguments
if len(sys.argv) < 5:
print 'Usage:'
print ' python %s <triplets training file> <nu
|
mber of triplets> <triplets visible history file> <triplets hidden history file> <number of triplets>' % sys.argv[0]
exit()
inputTrainingFile = sys.argv[1]
numTriplets = int(sys.argv[2])
inputTestFile = sys.argv[3]
inputHiddenTestFile = sys.argv[4]
numTripletsTest = int(sys.argv[5])
start = time.time()
userIdToIndex = {} # Key: userid, Value: Row in matrix
songIdToIndex = {} # Key: songid, Value: Column in matrix
userIndex = 0
songIndex = 0
rows = []
columns = []
entries = []
linesRead = 0
maxLines = numTriplets
for inputFile in [inputTrainingFile, inputTestFile, inputHiddenTestFile]:
linesRead = 0
f = open(inputFile)
for line in f:
userid, song, songCount = line.strip().split('\t')
# Fill in indices
if song not in songIdToIndex:
songIdToIndex[song] = songIndex
songIndex += 1
if userid not in userIdToIndex:
userIdToIndex[userid] = userIndex
userIndex += 1
# Fill in rows, columns and entries
rows.append(userIdToIndex[userid])
columns.append(songIdToIndex[song])
entries.append(int(songCount))
linesRead += 1
if linesRead >= maxLines:
break
if inputFile == inputTrainingFile:
numUsersInTraining = userIndex
maxLines = numTripletsTest
if inputFile == inputTestFile:
numSongs = songIndex
numUsers = userIndex
numNonZeros = len(entries)
rows = rows
columns = columns
entries = entries
# Write to a sparse matrix file that can be read with MATLAB
matrix_file = open('UserSongSparseMatrix' + str(numTriplets) + '_' + str(numTripletsTest) + '.txt', 'w')
for i in range(len(entries)):
matrix_file.write(str(rows[i]+1) + "\t" + str(columns[i]+1) + "\t" + str(entries[i]) + "\n")
#matrix_file.write(str(numUsers-1) + "\t" + str(numSongs-1) + "\t" + str(0.000000) + "\n")
matrix_file.close()
# reset everything to zero to read in the hidden matrix
rows = []
columns = []
entries = []
if inputFile == inputHiddenTestFile:
# Write to a sparse matrix file that can be read with MATLAB
matrix_file_test = open('UserSongSparseMatrixTest' + str(numTriplets) + '_' + str(numTripletsTest) + '.txt', 'w')
for i in range(len(entries)):
matrix_file_test.write(str(rows[i]+1) + "\t" + str(columns[i]+1) + "\t" + str(entries[i]) + "\n")
#matrix_file_test.write(str(userIndex-1) + "\t" + str(songIndex-1) + "\t" + str(0.000000) + "\n")
matrix_file_test.close()
f.close()
print "Done loading %d triplets!" % (numTriplets + numTripletsTest)
end = time.time()
print "Took %s seconds" % (end - start)
print "Number of users", numUsers
print "Number of songs", numSongs
print "You need to predict for the last %s users" % (numUsers - numUsersInTraining)
|
google/starthinker
|
dags/itp_audit_dag.py
|
Python
|
apache-2.0
| 35,487 | 0.026968 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
DV360 / CM360 Privacy Audit
Dashboard that shows performance metrics across browser to see the impact of privacy changes.
- Follow the instructions from 1-this document.
1-this document: https://docs.google.com/document/d/1HaRCMaBBEo0tSKwnofWNtaPjlW0ORcVHVwIRabct4fY/
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'recipe_timezone':'America/Los_Angeles', # Timezone for report dates.
'auth_sheets':'user', # Credentials used for Sheets.
'auth_bq':'service', # Credentials used for BigQuery.
'auth_dv':'user', # Credentials used for DV360.
'auth_cm':'user', # Credentials used for CM.
'cm_account_id':'', # Campaign Manager Account Id.
'floodlight_configuration_ids':[], # Comma delimited list of floodlight configuration ids for the Campaign Manager floodlight report.
'date_range':'LAST_365_DAYS', # Timeframe to run the ITP report for.
'cm_advertiser_ids':[], # Optional: Comma delimited list of CM advertiser ids.
'dv360_partner_id':'', # DV360 Partner id
'dv360_advertiser_ids':[], # Optional: Comma delimited list of DV360 Advertiser ids.
'recipe_name':'', # Name of report in DBM, should be unique.
'recipe_slug':'ITP_Audit_Dashboard', # BigQuery dataset for store dashboard tables.
}
RECIPE = {
'setup':{
'hour':[
3
],
'day':[
'Mon'
]
},
'tasks':[
{
'drive':{
'auth':{'field':{'name':'auth_sheets','kind':'authentication','order':1,'default':'user','description':'Credentials used for Sheets.'}},
'hour':[
],
'copy':{
'source':'https://docs.google.com/spreadsheets/d/1rH_PGXOYW2mVdmAYnKbv6kcaB6lQihAyMsGtFfinnqg/',
'destination':{'field':{'name':'recipe_name','prefix':'Privacy Audit ','kind':'string','order':1,'description':'Name of doc
|
ument to deploy to.','d
|
efault':''}}
}
}
},
{
'dataset':{
'auth':{'field':{'name':'auth_bq','kind':'authentication','order':1,'default':'service','description':'Credentials used for BigQuery.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':1,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}}
}
},
{
'dbm':{
'auth':{'field':{'name':'auth_dv','kind':'authentication','order':1,'default':'user','description':'Credentials used for DV360.'}},
'report':{
'name':{'field':{'name':'recipe_name','kind':'string','prefix':'ITP_Audit_Browser_','default':'ITP_Audit_Browser_','order':1,'description':'Name of report in DV360, should be unique.'}},
'timeout':90,
'filters':{
'FILTER_ADVERTISER':{
'values':{'field':{'name':'dv360_advertiser_ids','kind':'integer_list','order':6,'default':[],'description':'Optional: Comma delimited list of DV360 Advertiser ids.'}}
},
'FILTER_PARTNER':{
'values':{'field':{'name':'dv360_partner_id','kind':'integer','order':5,'default':'','description':'DV360 Partner id'}}
}
},
'body':{
'timezoneCode':{'field':{'name':'recipe_timezone','kind':'timezone','description':'Timezone for report dates.','default':'America/Los_Angeles'}},
'metadata':{
'title':{'field':{'name':'recipe_name','default':'ITP_Audit_Browser_','kind':'string','prefix':'ITP_Audit_Browser_','order':1,'description':'Name of report in DV360, should be unique.'}},
'dataRange':{'field':{'name':'date_range','kind':'choice','order':3,'default':'LAST_365_DAYS','choices':['LAST_7_DAYS','LAST_14_DAYS','LAST_30_DAYS','LAST_365_DAYS','LAST_60_DAYS','LAST_7_DAYS','LAST_90_DAYS','MONTH_TO_DATE','PREVIOUS_MONTH','PREVIOUS_QUARTER','PREVIOUS_WEEK','PREVIOUS_YEAR','QUARTER_TO_DATE','WEEK_TO_DATE','YEAR_TO_DATE'],'description':'Timeframe to run the ITP report for.'}},
'format':'CSV'
},
'params':{
'type':'TYPE_GENERAL',
'groupBys':[
'FILTER_ADVERTISER',
'FILTER_ADVERTISER_NAME',
'FILTER_ADVERTISER_CURRENCY',
'FILTER_MEDIA_PLAN',
'FILTER_MEDIA_PLAN_NAME',
'FILTER_CAMPAIGN_DAILY_FREQUENCY',
'FILTER_INSERTION_ORDER',
'FILTER_INSERTION_ORDER_NAME',
'FILTER_LINE_ITEM',
'FILTER_LINE_ITEM_NAME',
'FILTER_PAGE_LAYOUT',
'FILTER_WEEK',
'FILTER_MONTH',
'FILTER_YEAR',
'FILTER_PARTNER',
'FILTER_PARTNER_NAME',
'FILTER_LINE_ITEM_TYPE',
'FILTER_DEVICE_TYPE',
'FILTER_BROWSER',
'FILTER_ANONYMOUS_INVENTORY_MODELING',
'FILTER_OS'
],
'metrics':[
'METRIC_MEDIA_COST_ADVERTISER',
'METRIC_IMPRESSIONS',
'METRIC_CLICKS',
'METRIC_TOTAL_CONVERSIONS',
'METRIC_LAST_CLICKS',
'METRIC_LAST_IMPRESSIONS',
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_sds_imperial_1.py
|
Python
|
mit
| 482 | 0.045643 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from
|
swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_sds_imperial_1.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_sds_imperial_1_n")
#### BEG
|
IN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
vascotenner/holoviews
|
holoviews/plotting/util.py
|
Python
|
bsd-3-clause
| 10,673 | 0.002342 |
from __future__ import unicode_literals
import numpy as np
import param
from ..core import (HoloMap, DynamicMap, CompositeOverlay, Layout,
GridSpace, NdLayout, Store)
from ..core.util import (match_spec, is_number, wrap_tuple, basestring,
get_overlay_spec, unique_iterator, safe_unicode)
def displayable(obj):
"""
Predicate that returns whether the object is displayable or not
(i.e whether the object obeys the nesting hierarchy
"""
if isinstance(obj, HoloMap):
return not (obj.type in [Layout, GridSpace, NdLayout])
if isinstance(obj, (GridSpace, Layout, NdLayout)):
for el in obj.values():
if not displayable(el):
return False
return True
return True
class Warning(param.Parameterized): pass
display_warning = Warning(name='Warning')
def collate(obj):
if isinstance(obj, HoloMap):
display_warning.warning("Nesting %ss within a HoloMap makes it difficult "
"to access your data or control how it appears; "
"we recommend calling .collate() on the HoloMap "
"in order to follow the recommended nesting "
"structure shown in the Composing Data tutorial"
"(http://git.io/vtIQh)" % obj.type.__name__)
return obj.collate()
elif isinstance(obj, (Layout, NdLayout)):
try:
display_warning.warning(
"Layout contains HoloMaps which are not nested in the "
"recommended format for accessing your data; calling "
".collate() on these objects will resolve any violations "
"of the recommended nesting presented in the Composing Data "
"tutorial (http://git.io/vqs03)")
expanded = []
for el in obj.values():
if isinstance(el, HoloMap) and not displayable(el):
collated_layout = Layout.from_values(el.collate())
expanded.extend(collated_layout.values())
return Layout(expanded)
except:
raise Exception(undisplayable_info(obj))
else:
raise Exception(undisplayable_info(obj))
def undisplayable_info(obj, html=False):
"Generate helpful message regarding an undisplayable object"
collate = '<tt>collate</tt>' if html else 'collate'
info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)"
if isinstance(obj, HoloMap):
error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__
remedy = "Please call the %s method to generate a displayable object" % collate
elif isinstance(obj, Layout):
error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
elif isinstance(obj, GridSpace):
error = "GridSpace containing HoloMaps of Layouts cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
if not html:
return '\n'.join([error, remedy, info])
else:
return "<center>{msg}</center>".format(msg=('<br>'.join(
['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
def compute_sizes(sizes, size_fn, scaling_factor, scaling_method, base_size):
"""
Scales point sizes according to a scaling factor,
base size and size_fn, which will be applied before
scaling.
"""
if scaling_method == 'area':
pass
elif scaling_method == 'width':
scaling_factor = scaling_factor**2
else:
raise ValueError(
'Invalid value for argument "scaling_method": "{}". '
'Valid values are: "width",
|
"area".'.format(scaling_method))
sizes = size_fn(sizes)
return (base_size*scaling_factor*sizes)
def get_sideplot_ranges(plot, element, main, ranges):
"""
Utility to find the range for an adjoined
plot given the plot, the element, the
Element the plot is adjoined to and the
dictionary of ranges.
""
|
"
key = plot.current_key
dims = element.dimensions(label=True)
dim = dims[1] if dims[1] != 'Frequency' else dims[0]
range_item = main
if isinstance(main, HoloMap):
if issubclass(main.type, CompositeOverlay):
range_item = [hm for hm in main.split_overlays()[1]
if dim in hm.dimensions('all', label=True)][0]
else:
range_item = HoloMap({0: main}, kdims=['Frame'])
ranges = match_spec(range_item.last, ranges)
if dim in ranges:
main_range = ranges[dim]
else:
framewise = plot.lookup_options(range_item.last, 'norm').options.get('framewise')
if framewise and range_item.get(key, False):
main_range = range_item[key].range(dim)
else:
main_range = range_item.range(dim)
# If .main is an NdOverlay or a HoloMap of Overlays get the correct style
if isinstance(range_item, HoloMap):
range_item = range_item.last
if isinstance(range_item, CompositeOverlay):
range_item = [ov for ov in range_item
if dim in ov.dimensions('all', label=True)][0]
return range_item, main_range, dim
def within_range(range1, range2):
"""Checks whether range1 is within the range specified by range2."""
return ((range1[0] is None or range2[0] is None or range1[0] >= range2[0]) and
(range1[1] is None or range2[1] is None or range1[1] <= range2[1]))
def validate_sampled_mode(holomaps, dynmaps):
composite = HoloMap(enumerate(holomaps), kdims=['testing_kdim'])
holomap_kdims = set(unique_iterator([kd.name for dm in holomaps for kd in dm.kdims]))
hmranges = {d: composite.range(d) for d in holomap_kdims}
if any(not set(d.name for d in dm.kdims) <= holomap_kdims
for dm in dynmaps):
raise Exception('In sampled mode DynamicMap key dimensions must be a '
'subset of dimensions of the HoloMap(s) defining the sampling.')
elif not all(within_range(hmrange, dm.range(d)) for dm in dynmaps
for d, hmrange in hmranges.items() if d in dm.kdims):
raise Exception('HoloMap(s) have keys outside the ranges specified on '
'the DynamicMap(s).')
def get_dynamic_mode(composite):
"Returns the common mode of the dynamic maps in given composite object"
dynmaps = composite.traverse(lambda x: x, [DynamicMap])
holomaps = composite.traverse(lambda x: x, ['HoloMap'])
dynamic_modes = [m.call_mode for m in dynmaps]
dynamic_sampled = any(m.sampled for m in dynmaps)
if holomaps:
validate_sampled_mode(holomaps, dynmaps)
elif dynamic_sampled and not holomaps:
raise Exception("DynamicMaps in sampled mode must be displayed alongside "
"a HoloMap to define the sampling.")
if len(set(dynamic_modes)) > 1:
raise Exception("Cannot display composites of DynamicMap objects "
"with different interval modes (i.e open or bounded mode).")
elif dynamic_modes and not holomaps:
return 'bounded' if dynamic_modes[0] == 'key' else 'open', dynamic_sampled
else:
return None, dynamic_sampled
def initialize_sampled(obj, dimensions, key):
"""
Initializes any DynamicMaps in sampled mode.
"""
select = dict(zip([d.name for d in dimensions], key))
try:
obj.select([DynamicMap], **select)
except KeyError:
pass
def save_frames(obj, filename, fmt=None, backend=None, options=None):
"""
Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default.
"""
backend = Store.current_backend if backend is None else backend
renderer = Store.renderers[backend]
fmt = renderer.params('fig').objects[0] if fmt is None else fmt
plot = renderer.get_plot(obj)
for i in range(len(plot)):
plot.update(i)
|
kuiche/chromium
|
tools/grit/grit/node/message.py
|
Python
|
bsd-3-clause
| 9,031 | 0.009412 |
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Handling of the <message> element.
'''
import re
import types
from grit.node import base
import grit.format.rc_header
import grit.format.rc
from grit import clique
from grit import exception
from grit import tclib
from grit import util
# Finds whitespace at the start and end of a string which can be multiline.
_WHITESPACE = re.compile('(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z',
re.DOTALL | re.MULTILINE)
class MessageNode(base.ContentNode):
'''A <message> element.'''
# For splitting a list of things that can be separated by commas or
# whitespace
_SPLIT_RE = re.compile('\s*,\s*|\s+')
def __init__(self):
super(type(self), self).__init__()
# Valid after EndParsing, this is the MessageClique that contains the
# source message and any translations of it that have been loaded.
self.clique = None
# We don't send leading and trailing whitespace into the translation
# console, but rather tack it onto the source message and any
# translations when formatting them into RC files or what have you.
self.ws_at_start = '' # Any whitespace characters at the start of the text
self.ws_at_end = '' # --"-- at the end of the text
# A list of "shortcut groups" this message is in. We check to make sure
# that shortcut keys (e.g. &J) within each shortcut group are unique.
self.shortcut_groups_ = []
def _IsValidChild(self, child):
return isinstance(child, (PhNode))
def _IsValidAttribute(self, name, value):
if name not in ['name', 'offset', 'translateable', 'desc', 'meaning',
'internal_comment', 'shortcut_groups', 'custom_type',
'validation_expr', 'use_name_for_id']:
return False
if name == 'translateable' and value not in ['true', 'false']:
return False
return True
def MandatoryAttributes(self):
return ['name|offset']
def DefaultAttributes(self):
return {
'translateable' : 'true',
'desc' : '',
'meaning' : '',
'internal_comment' : '',
'shortcut_groups' : '',
'custom_type' : '',
'validation_expr' : '',
'use_name_for_id' : 'false',
}
def GetTextualIds(self):
'''
Returns the concatenation of the parent's node first_id and
this node's offset if it has one, otherwise just call the
superclass' implementation
'''
if 'offset' in self.attrs:
# we search for the first grouping node in the parents' list
# to take care of the case where the first parent is an <if> node
grouping_parent = self.parent
import grit.node.empty
while grouping_parent and not isinstance(grouping_parent,
grit.node.empty.GroupingNode):
grouping_parent = grouping_parent.parent
assert 'first_id' in grouping_parent.attrs
return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']]
else:
return super(type(self), self).GetTextualIds()
def IsTranslateable(self):
return self.attrs['translateable'] == 'true'
def ItemFormatter(self, t):
if t == 'rc_header':
return grit.format.rc_header.Item()
elif (t in ['rc_all', 'rc_translateable', 'rc_nontranslateable'] and
self.SatisfiesOutputCondition()):
return grit.format.rc.Message()
else:
return super(type(self), self).ItemFormatter(t)
def EndParsing(self):
|
super(type(self), self).EndParsing()
# Make the text (including placeholder references) and list of placeholders,
# then strip and store leading and trailing whitespace and create the
# tclib.Message() and a clique to contain it.
text = ''
placeholders = []
for item in self.mixed_content:
if isinstance(item, types.StringTypes):
text += item
else:
presentation = item.att
|
rs['name'].upper()
text += presentation
ex = ' '
if len(item.children):
ex = item.children[0].GetCdata()
original = item.GetCdata()
placeholders.append(tclib.Placeholder(presentation, original, ex))
m = _WHITESPACE.match(text)
if m:
self.ws_at_start = m.group('start')
self.ws_at_end = m.group('end')
text = m.group('body')
self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups'])
self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != '']
description_or_id = self.attrs['desc']
if description_or_id == '' and 'name' in self.attrs:
description_or_id = 'ID: %s' % self.attrs['name']
assigned_id = None
if self.attrs['use_name_for_id'] == 'true':
assigned_id = self.attrs['name']
message = tclib.Message(text=text, placeholders=placeholders,
description=description_or_id,
meaning=self.attrs['meaning'],
assigned_id=assigned_id)
self.clique = self.UberClique().MakeClique(message, self.IsTranslateable())
for group in self.shortcut_groups_:
self.clique.AddToShortcutGroup(group)
if self.attrs['custom_type'] != '':
self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'],
clique.CustomType))
elif self.attrs['validation_expr'] != '':
self.clique.SetCustomType(
clique.OneOffCustomType(self.attrs['validation_expr']))
def GetCliques(self):
if self.clique:
return [self.clique]
else:
return []
def Translate(self, lang):
'''Returns a translated version of this message.
'''
assert self.clique
msg = self.clique.MessageForLanguage(lang,
self.PseudoIsAllowed(),
self.ShouldFallbackToEnglish()
).GetRealContent()
return msg.replace('[GRITLANGCODE]', lang)
def NameOrOffset(self):
if 'name' in self.attrs:
return self.attrs['name']
else:
return self.attrs['offset']
def GetDataPackPair(self, output_dir, lang):
'''Returns a (id, string) pair that represents the string id and the string
in utf8. This is used to generate the data pack data file.
'''
from grit.format import rc_header
id_map = rc_header.Item.tids_
id = id_map[self.GetTextualIds()[0]]
message = self.ws_at_start + self.Translate(lang) + self.ws_at_end
# |message| is a python unicode string, so convert to a utf16 byte stream
# because that's the format of datapacks. We skip the first 2 bytes
# because it is the BOM.
return id, message.encode('utf16')[2:]
# static method
def Construct(parent, message, name, desc='', meaning='', translateable=True):
'''Constructs a new message node that is a child of 'parent', with the
name, desc, meaning and translateable attributes set using the same-named
parameters and the text of the message and any placeholders taken from
'message', which must be a tclib.Message() object.'''
# Convert type to appropriate string
if translateable:
translateable = 'true'
else:
translateable = 'false'
node = MessageNode()
node.StartParsing('message', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('desc', desc)
node.HandleAttribute('meaning', meaning)
node.HandleAttribute('translateable', translateable)
items = message.GetContent()
for ix in range(len(items)):
if isinstance(items[ix], types.StringTypes):
text = items[ix]
# Ensure whitespace at front and back of message is correctly handled.
if ix == 0:
text = "'''" + text
if ix == len(items) - 1:
text = text + "'''"
node.AppendContent(text)
else:
phnode = PhNode()
phnode.StartParsing('ph', node)
phnode.HandleAttribute('name', items[ix].GetPresentation())
phnode.AppendContent(items[ix].GetOriginal
|
kytos/kytos
|
kytos/core/exceptions.py
|
Python
|
mit
| 3,168 | 0 |
"""Kytos Core-Defined Exceptions."""
class KytosCoreException(Exception):
"""Exception thrown when KytosCore is broken."""
def __str__(self):
"""Return message of KytosCoreException."""
return 'KytosCore exception: ' + super().__str__()
class KytosSwitchOfflineException(Exception):
"""Exception thrown when a switch is offline."""
def __init__(self, switch):
"""Require a switch.
Args:
switch (:
|
class:`~kytos.core.switch.Switch`): A switch offline.
"""
super().__init__()
self.switch = switch
def __str__(self):
"""Return message of KytosSwitchOfflineException."""
|
msg = 'The switch {} is not reachable. Please check the connection '
msg += 'between the switch and the controller.'
return msg.format(self.switch.dpid)
class KytosEventException(Exception):
"""Exception thrown when a KytosEvent have an illegal use."""
def __init__(self, message="KytosEvent exception", event=None):
"""Assign parameters to instance variables.
Args:
message (string): message from KytosEventException.
event (:class:`~kytos.core.events.KytosEvent`): Event malformed.
"""
super().__init__()
self.message = message
self.event = event
def __str__(self):
"""Return the full message from KytosEventException."""
message = self.message
if self.event:
message += ". EventType: " + type(self.event)
return message
class KytosWrongEventType(KytosEventException):
"""Exception related to EventType.
When related to buffers, it means that the EventType is not allowed on
that buffer.
"""
class KytosNoTagAvailableError(Exception):
"""Exception raised when a link has no vlan available."""
def __init__(self, link):
"""Require a link.
Args:
link (:class:`~kytos.core.link.Link`): A link with no vlan
available.
"""
super().__init__()
self.link = link
def __str__(self):
"""Full message."""
msg = f'Link {self.link.id} has no vlan available.'
return msg
class KytosLinkCreationError(Exception):
"""Exception thrown when the link has an empty endpoint."""
# Exceptions related to NApps
class KytosNAppException(Exception):
"""Exception raised on a KytosNApp."""
def __init__(self, message="KytosNApp exception"):
"""Assign the parameters to instance variables.
Args:
message (string): message from KytosNAppException.
"""
super().__init__()
self.message = message
def __str__(self):
"""Return the message from KytosNAppException."""
return self.message
class KytosNAppMissingInitArgument(KytosNAppException):
"""Exception thrown when NApp have a missing init argument."""
def __init__(self, message="KytosNAppMissingInitArgument"):
"""Assing parameters to instance variables.
Args:
message (str): Name of the missed argument.
"""
super().__init__(message=message)
|
Psirus/altay
|
altai/lib/vented_box.py
|
Python
|
bsd-3-clause
| 1,632 | 0 |
# -*- coding: utf-8 -*-
""" Vented box enclosure """
import numpy as np
from . import air
class VentedBox(object):
""" Model a vented box loudspeaker enclosure """
def __init__(self, Vab, fb, Ql):
self._Vab = Vab
#: Acoustic compliance of box :math:`C_{ab}`
#:
|
#: .. note:: Do not set this directly, use :meth:`Vab`
self.Cab = Vab / (air.RHO*air.C**2)
self._fb = fb
#: Angular frequency :math:`\omega_b = 2 \pi f_b`
|
#:
#: .. note:: Do not set this directly, use :meth:`fb`
self.wb = 2.0*np.pi*fb
#: Time constant of the box :math:`T_b = \frac{1}{\omega_b}`; not to
#: be confused with a period
#: :math:`t = \frac{1}{f} = \frac{2\pi}{\omega}`
#:
#: .. note:: Do not set this directly, use :meth:`fb`
self.Tb = 1.0 / self.wb
#: Enclosure leakage losses
self.Ql = Ql
@property
def Vab(self):
""" Box Volume in m³
The box volume in m³. Setting this attribute also sets :attr:`Cab`.
"""
return self._Vab
@Vab.setter
def Vab(self, Vab):
""" Sets Vab, as well as Cab """
self._Vab = Vab
self.Cab = Vab / (air.RHO*air.C**2)
@property
def fb(self):
""" Box Tuning Frequency in Hz
The tuning frequency of the box. Setting this attribute also sets
:attr:`wb` and :attr:`Tb`.
"""
return self._fb
@fb.setter
def fb(self, fb):
""" Sets fb, as well as wb and Tb """
self._fb = fb
self.wb = 2*np.pi*fb
self.Tb = 1 / self.wb
|
wraithan/reciblog
|
urls.py
|
Python
|
bsd-2-clause
| 617 | 0.003241 |
from django.conf.urls.defaults import patterns, url, include
from django.contrib import admin
from django.conf import settings
from django.views.generic.simple import direct_to_template
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^', include('reciblog.blog.urls')),
url(r'^about$', direct_to_template, {'template': 'about.html'}, name='abo
|
ut'),
url(r'^admin/', include(admin.site.urls))
|
,
)
if settings.DEBUG == True:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
Rudloff/youtube-dl
|
youtube_dl/extractor/generic.py
|
Python
|
unlicense
| 99,048 | 0.002343 |
# encoding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
orderedSet,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
url_basename,
xpath_text,
)
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxEmbedIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .vimeo import VimeoIE
from .dailymotion import (
DailymotionIE,
DailymotionCloudIE,
)
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .screenwavemedia import ScreenwaveMediaIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .vessel import VesselIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudIE
from .vbox7 import Vbox7IE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.te
|
legraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': 're:^https?:/
|
/.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.app
|
smendez-hi/SUMO-hib
|
tools/xml/rebuildSchemata.py
|
Python
|
gpl-3.0
| 848 | 0.005896 |
#!/usr/bin/env python
"""
@file rebuildSchemata.py
@author Michael Behrisch
@date 2011-07-11
@version $Id: rebuildSchemata.py 11671 2012-01-07 20:14:30Z behrisch $
Let all SUMO binarie write the schema for their config
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2011-2012 DLR (http://www.dlr.de/) and contributors
All rights
|
reserved
"""
import os, sys, subprocess
homeDir = os.environ.get("SUMO_HOME", os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
binDir = os.environ.get("SUMO_BINDIR", os.path.join(homeDir, "bin"))
for exe in "activitygen dfrouter duarouter jtrrouter netconvert netgen od2trips polyconvert sumo".split():
subprocess.call([os.path.join(binDir, exe), "--save-schema", os.path.join(homeDir, "docs", "int
|
ernet", "xsd" , exe+"Configuration.xsd")])
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_fstring.py
|
Python
|
apache-2.0
| 47,266 | 0.000783 |
# -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for
|
that
# code without un
|
icode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import types
import decimal
import unittest
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f"-{x()}-"}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.valu
|
kusamau/cedaMarkup
|
ceda_markup/markup.py
|
Python
|
bsd-3-clause
| 2,962 | 0.009453 |
'''
BSD Licence
Copyright (c) 2012, Science & Technology Facilities Council (STFC)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Science & Technology Facilities Council (STFC)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Created on 29 Jun 2012
@author: mnagni
'''
from xml.etree.ElementTree import _ElementInterface, Element
def createMarkup(tagName, tagPrefix, tagNamespace, root = None):
'''
Returns an ElementTree.Element instance
@param tagName: the tag name
@param tagPrefix: the prefix to use for this tag
@param tagNamespace: the tag's namespace
@param root: the root Element of the document containing this element
@return: a new instance
'''
#attach gml namesp
|
ace to the document root element
_tag = tagName
if root is not None:
if isinstance(root, _ElementInterface):
if root.get('xmlns') == tagNamespace:
_tag = tagName
else:
|
root.set("xmlns:%s" % (tagPrefix), tagNamespace)
if tagName is not None and tagPrefix is not None:
_tag = "%s:%s" % (tagPrefix, tagName)
markup = Element(_tag)
if root is None:
markup.set("xmlns", tagNamespace)
return markup
def createSimpleMarkup(text, root, tagName, ns, prefix):
"""
Returns a new markup filling only its 'text' attribute
"""
markup = createMarkup(tagName, prefix, ns, root)
markup.text = text
return markup
|
mupi/tecsaladeaula
|
core/templatetags/usergroup.py
|
Python
|
agpl-3.0
| 1,920 | 0.001563 |
# https://djangosnippets.org/snippets/2566/
from django import template
from django.template import resolve_variable, NodeList
from django.contrib.auth.models import Group
register = template.Library()
@register.tag()
def ifusergroup(parser, token):
""" Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app
|
and middleware.
Usage:
|
{% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins|Group1|"Group 2" %} ... {% endifusergroup %}, or
{% ifusergroup Admins %} ... {% else %} ... {% endifusergroup %}
"""
try:
_, group = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("Tag 'ifusergroup' requires 1 argument.")
nodelist_true = parser.parse(('else', 'endifusergroup'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifusergroup',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return GroupCheckNode(group, nodelist_true, nodelist_false)
class GroupCheckNode(template.Node):
def __init__(self, group, nodelist_true, nodelist_false):
self.group = group
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
def render(self, context):
user = resolve_variable('user', context)
if not user.is_authenticated():
return self.nodelist_false.render(context)
for group in self.group.split("|"):
group = group[1:-1] if group.startswith('"') and group.endswith('"') else group
try:
if Group.objects.get(name=group) in user.groups.all():
return self.nodelist_true.render(context)
except Group.DoesNotExist:
pass
return self.nodelist_false.render(context)
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_jira/migrations/0018_project_runtime_state.py
|
Python
|
mit
| 465 | 0 |
# Generated by Django 1.11.7 on 2018-05-21 09:16
fro
|
m django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_jira', '0017_project_action'),
]
operations = [
migrations
|
.AddField(
model_name='project',
name='runtime_state',
field=models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
]
|
AndrewHanes/Python-Webnews
|
webnews/api.py
|
Python
|
mit
| 2,609 | 0.004983 |
import json
import enum
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib import request
class APINonSingle:
def __init__(self, api_key, agent = "webnews-python", webnews_base = "https://webnews.csh.rit.edu/"):
self.agent = agent
self.api_key = api_key
self.webnews_base = webnews_base
class Actions(enum.Enum):
user = "user"
unread_counts = "unread_counts"
newsgroups = "newsgroups"
search = "search"
compose = "compose"
def POST(self, action, args={}):
if type(action) == API.Actions:
action = action.value
args['api_key'] = self.api_key
args['api_agent'] = self.agent
args = urlencode(args).encode('utf-8')
req = request.Request(self.webnews_base+ action)
req.add_header('Accept', 'application/json')
resp = urlopen(req, args).read().decode('utf-8')
return json.loads(resp)
def GET(self, action, args={}):
if type(action) == API.Actions:
action = action.value
args['api_key'] = self.api_key
args['api_agent'] = self.agent
args = urlencode(args)
req = request.Request(self.webnews_base + action + '?' + args)
req.add_header('Accept', 'application/json')
resp = urlopen(req).read().decode('utf-8')
return json.loads(resp)
def user(self):
return self.GET(API.Actions.user)
def unread_counts(self):
return self.GET(API.Actions.unread_counts)
def newsgroups(self):
return self.GET(API.Actions.newsgroups)
def newsgroups_search(self, newsgroup):
return self.GET("newsgroups/" + newsgroup)
def newsgroup_posts(self, newsgroup, params={}):
return self.GET(newsgroup + '/index', params)
def search(self, param
|
s = {}):
return self.GET(API.Actions.search, params)
def post_specifics(self, newsgroup, index, params={}):
return self.GET(str(newsgroup)+"/"+str(index), params)
def compose(self, newsgroup, subject, body, params={}):
params['subject'] = subject
params['body'] = body
params['newsgroup'] = newsgroup
return self.POST(API.Actions.compose, params)
"""
Wrap the APINonSingle object so that
only a single object for each key will exist.
Optimiza
|
tion for object implementation
"""
class API(APINonSingle):
_instance = {}
def __new__(cls, *args, **kwargs):
if not args[0] in cls._instance:
cls._instance[args[0]] = APINonSingle(*args, **kwargs)
return cls._instance[args[0]]
|
UnrememberMe/pants
|
src/python/pants/backend/docgen/register.py
|
Python
|
apache-2.0
| 1,019 | 0.004907 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.docgen.targets.doc import Page, Wiki, WikiArtifact
from pants.backend.docgen.tasks.generate_pants_reference import GeneratePantsReference
from pants.backend.docgen.tasks.markdown_to_html import MarkdownToHtml
from pants.build_graph.build_f
|
ile_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
'page': Page,
},
objects={
'wiki_artifact': WikiArtifact,
# TODO: Why is this capitalized?
'Wiki': Wiki,
},
)
def register_goals():
task(name='markdown',
|
action=MarkdownToHtml).install(),
task(name='reference', action=GeneratePantsReference).install()
|
BobbyJacobs/cs3240-demo
|
hello.py
|
Python
|
mit
| 30 | 0.033333 |
def main
|
():
print(
|
"Hello!")
|
mesonbuild/meson
|
mesonbuild/interpreter/primitives/string.py
|
Python
|
apache-2.0
| 6,549 | 0.003207 |
# Copyright 2021 The Meson development team
# SPDX-license-identifier: Apache-2.0
from __future__ import annotations
import re
import os
import typing as T
from ...mesonlib import version_compare
from ...interpreterbase import (
ObjectHolder,
MesonOperator,
FeatureNew,
typed_operator,
noArgsFlattening,
noKwargs,
noPosargs,
typed_pos_args,
InvalidArguments,
)
if T.TYPE_CHECKING:
# Object holders need the actual interpreter
from ...interpreter import Interpreter
from ...interpreterbase import TYPE_var, TYPE_kwargs
class StringHolder(ObjectHolder[str]):
def __init__(self, obj: str, interpreter: 'Interpreter') -> None:
super().__init__(obj, interpreter)
self.methods.update({
'contains': self.contains_method,
'startswith': self.startswith_method,
'endswith': self.endswith_method,
'format': self.format_method,
'join': self.join_method,
'replace': self.replace_method,
'split': self.split_method,
'strip': self.strip_method,
'substring': self.substring_method,
'to_int': self.to_int_method,
'to_lower': self.to_lower_method,
'to_upper': self.to_upper_method,
'underscorify': self.underscorify_method,
'version_compare': self.version_compare_method,
})
self.trivial_operators.update({
# Arithmetic
MesonOperator.PLUS: (str, lambda x: self.held_object + x),
|
# Comparison
MesonOperator.EQUALS: (str, lambda x: self.held_object == x),
MesonOperator.NOT_EQUALS: (str, lambda x: self.held_object != x),
MesonOperator.GREATER: (str, lambda x: self.held_object > x),
MesonOperator.LESS: (str, lambda x: self.held_object < x),
MesonOperator.GREATER_EQUALS: (str, lambda x: self.held_object >= x),
|
MesonOperator.LESS_EQUALS: (str, lambda x: self.held_object <= x),
})
# Use actual methods for functions that require additional checks
self.operators.update({
MesonOperator.DIV: self.op_div,
MesonOperator.INDEX: self.op_index,
})
def display_name(self) -> str:
return 'str'
@noKwargs
@typed_pos_args('str.contains', str)
def contains_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
return self.held_object.find(args[0]) >= 0
@noKwargs
@typed_pos_args('str.startswith', str)
def startswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
return self.held_object.startswith(args[0])
@noKwargs
@typed_pos_args('str.endswith', str)
def endswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
return self.held_object.endswith(args[0])
@noArgsFlattening
@noKwargs
@typed_pos_args('str.format', varargs=object)
def format_method(self, args: T.Tuple[T.List[object]], kwargs: TYPE_kwargs) -> str:
arg_strings: T.List[str] = []
for arg in args[0]:
if isinstance(arg, bool): # Python boolean is upper case.
arg = str(arg).lower()
arg_strings.append(str(arg))
def arg_replace(match: T.Match[str]) -> str:
idx = int(match.group(1))
if idx >= len(arg_strings):
raise InvalidArguments(f'Format placeholder @{idx}@ out of range.')
return arg_strings[idx]
return re.sub(r'@(\d+)@', arg_replace, self.held_object)
@noKwargs
@typed_pos_args('str.join', varargs=str)
def join_method(self, args: T.Tuple[T.List[str]], kwargs: TYPE_kwargs) -> str:
return self.held_object.join(args[0])
@noKwargs
@typed_pos_args('str.replace', str, str)
def replace_method(self, args: T.Tuple[str, str], kwargs: TYPE_kwargs) -> str:
return self.held_object.replace(args[0], args[1])
@noKwargs
@typed_pos_args('str.split', optargs=[str])
def split_method(self, args: T.Tuple[T.Optional[str]], kwargs: TYPE_kwargs) -> T.List[str]:
return self.held_object.split(args[0])
@noKwargs
@typed_pos_args('str.strip', optargs=[str])
def strip_method(self, args: T.Tuple[T.Optional[str]], kwargs: TYPE_kwargs) -> str:
return self.held_object.strip(args[0])
@noKwargs
@typed_pos_args('str.substring', optargs=[int, int])
def substring_method(self, args: T.Tuple[T.Optional[int], T.Optional[int]], kwargs: TYPE_kwargs) -> str:
start = args[0] if args[0] is not None else 0
end = args[1] if args[1] is not None else len(self.held_object)
return self.held_object[start:end]
@noKwargs
@noPosargs
def to_int_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> int:
try:
return int(self.held_object)
except ValueError:
raise InvalidArguments(f'String {self.held_object!r} cannot be converted to int')
@noKwargs
@noPosargs
def to_lower_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:
return self.held_object.lower()
@noKwargs
@noPosargs
def to_upper_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:
return self.held_object.upper()
@noKwargs
@noPosargs
def underscorify_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:
return re.sub(r'[^a-zA-Z0-9]', '_', self.held_object)
@noKwargs
@typed_pos_args('str.version_compare', str)
def version_compare_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
return version_compare(self.held_object, args[0])
@FeatureNew('/ with string arguments', '0.49.0')
@typed_operator(MesonOperator.DIV, str)
def op_div(self, other: str) -> str:
return os.path.join(self.held_object, other).replace('\\', '/')
@typed_operator(MesonOperator.INDEX, int)
def op_index(self, other: int) -> str:
try:
return self.held_object[other]
except IndexError:
raise InvalidArguments(f'Index {other} out of bounds of string of size {len(self.held_object)}.')
class MesonVersionString(str):
pass
class MesonVersionStringHolder(StringHolder):
@noKwargs
@typed_pos_args('str.version_compare', str)
def version_compare_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
self.interpreter.tmp_meson_version = args[0]
return version_compare(self.held_object, args[0])
|
nesterione/problem-solving-and-algorithms
|
problems/Checkio/TheFlatDictionary.py
|
Python
|
apache-2.0
| 1,262 | 0.003962 |
def flatten(dictionary):
stack = [((), dictionary)]
result = {}
while stack:
|
path, current = stack.pop()
for k, v in current.items():
if isinstance(v, dict) and bool(v):
stack.append((path + (k,), v))
else:
whatadd = "" if isinstance (v, dict) else v
result["/".join((path + (k,)))] = whatadd
return result
if __name__ == '__main__':
assert flatten({"key": "value"}) == {"key": "value"}, "Simple"
|
assert flatten(
{"key": {"deeper": {"more": {"enough": "value"}}}}
) == {"key/deeper/more/enough": "value"}, "Nested"
assert flatten({"empty": {}}) == {"empty": ""}, "Empty value"
assert flatten({"name": {
"first": "One",
"last": "Drone"},
"job": "scout",
"recent": {},
"additional": {
"place": {
"zone": "1",
"cell": "2"}}}
) == {"name/first": "One",
"name/last": "Drone",
"job": "scout",
"recent": "",
"additional/place/zone": "1",
"additional/place/cell": "2"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.