repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
magcius/sweettooth
|
sweettooth/extensions/migrations/0008_new_icon_default.py
|
Python
|
agpl-3.0
| 6,118 | 0.007192 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
new_default = orm.Extension._meta.get_field_by_name('icon')[0].default
for ext in orm.Extension.objects.filter(icon=""):
ext.icon = new_default
ext.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
|
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'extensions.extension': {
'Meta': {'object_name': 'Extension'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': "'/static/images/plugin.png'", 'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'screenshot': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'extensions.extensionversion': {
'Meta': {'unique_together': "(('extension', 'version'),)", 'object_name': 'ExtensionVersion'},
'extension': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['extensions.Extension']"}),
'extra_json_fields': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shell_versions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['extensions.ShellVersion']", 'symmetrical': 'False'}),
'source': ('django.db.models.fields.files.FileField', [], {'max_length': '223'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'extensions.shellversion': {
'Meta': {'object_name': 'ShellVersion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.PositiveIntegerField', [], {}),
'minor': ('django.db.models.fields.PositiveIntegerField', [], {}),
'point': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['extensions']
|
tmetsch/graph_stitcher
|
stitcher/vis.py
|
Python
|
mit
| 5,618 | 0 |
"""
Visualize possible stitches with the outcome of the validator.
"""
import math
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import stitcher
SPACE = 25
TYPE_FORMAT = {'a': '^', 'b': 's', 'c': 'v'}
def show(graphs, request, titles, prog='neato', size=None,
type_format=None, filename=None):
"""
Display the results using matplotlib.
"""
if not size:
size = _get_size(len(graphs))
fig, axarr = plt.subplots(size[0], size[1], figsize=(18, 10))
fig.set_facecolor('white')
x_val = 0
y_val = 0
index = 0
if size[0] == 1:
axarr = np.array(axarr).reshape((1, size[1]))
for candidate in graphs:
# axarr[x_val, y_val].axis('off')
axarr[x_val, y_val].xaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].yaxis.set_major_formatter(plt.NullFormatter())
axarr[x_val, y_val].xaxis.set_ticks([])
axarr[x_val, y_val].yaxis.set_ticks([])
axarr[x_val, y_val].set_title(titles[index])
# axarr[x_val, y_val].set_axis_bgcolor("white")
if not type_format:
type_format = TYPE_FORMAT
_plot_subplot(candidate, request.nodes(), prog, type_format,
axarr[x_val, y_val])
y_val += 1
if y_val > size[1] - 1:
y_val = 0
x_val += 1
index += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_subplot(graph, new_nodes, prog, type_format, axes):
"""
Plot a single candidate graph.
"""
pos = nx.nx_agraph.graphviz_layout(graph, prog=prog)
# draw the nodes
for node, values in graph.nodes(data=True):
shape = 'o'
if values[stitcher.TYPE_ATTR] in type_format:
shape = type_format[values[stitcher.TYPE_ATTR]]
color = 'g'
alpha = 0.8
if node in new_nodes:
color = 'b'
alpha = 0.2
elif 'rank' in values and values['rank'] > 7:
color = 'r'
elif 'rank' in values and values['rank'] < 7 and values['rank'] > 3:
color = 'y'
nx.draw_networkx_nodes(graph, pos, nodelist=[node], node_color=color,
node_shape=shape, alpha=alpha, ax=axes)
# draw the edges
dotted_line = []
normal_line = []
for src, trg in graph.edges():
if src in new_nodes and trg not in new_nodes:
|
dotted_line.append((src, trg))
else:
normal_line.append((src, trg))
nx.draw_networkx_edges(graph, pos, edgelist=dotted_line, style='dotted',
ax=axes)
|
nx.draw_networkx_edges(graph, pos, edgelist=normal_line, ax=axes)
# draw labels
nx.draw_networkx_labels(graph, pos, ax=axes)
def show_3d(graphs, request, titles, prog='neato', filename=None):
"""
Show the candidates in 3d - the request elevated above the container.
"""
fig = plt.figure(figsize=(18, 10))
fig.set_facecolor('white')
i = 0
size = _get_size(len(graphs))
for graph in graphs:
axes = fig.add_subplot(size[0], size[1], i+1,
projection=Axes3D.name)
axes.set_title(titles[i])
axes._axis3don = False
_plot_3d_subplot(graph, request, prog, axes)
i += 1
fig.tight_layout()
if filename is not None:
plt.savefig(filename)
else:
plt.show()
plt.close()
def _plot_3d_subplot(graph, request, prog, axes):
"""
Plot a single candidate graph in 3d.
"""
cache = {}
tmp = graph.copy()
for node in request.nodes():
tmp.remove_node(node)
pos = nx.nx_agraph.graphviz_layout(tmp, prog=prog)
# the container
for item in tmp.nodes():
axes.plot([pos[item][0]], [pos[item][1]], [0], linestyle="None",
marker="o", color='gray')
axes.text(pos[item][0], pos[item][1], 0, item)
for src, trg in tmp.edges():
axes.plot([pos[src][0], pos[trg][0]],
[pos[src][1], pos[trg][1]],
[0, 0], color='gray')
# the new nodes
for item in graph.nodes():
if item in request.nodes():
for nghb in graph.neighbors(item):
if nghb in tmp.nodes():
x_val = pos[nghb][0]
y_val = pos[nghb][1]
if (x_val, y_val) in list(cache.values()):
x_val = pos[nghb][0] + random.randint(10, SPACE)
y_val = pos[nghb][0] + random.randint(10, SPACE)
cache[item] = (x_val, y_val)
# edge
axes.plot([x_val, pos[nghb][0]],
[y_val, pos[nghb][1]],
[SPACE, 0], color='blue')
axes.plot([x_val], [y_val], [SPACE], linestyle="None", marker="o",
color='blue')
axes.text(x_val, y_val, SPACE, item)
for src, trg in request.edges():
if trg in cache and src in cache:
axes.plot([cache[src][0], cache[trg][0]],
[cache[src][1], cache[trg][1]],
[SPACE, SPACE], color='blue')
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
|
waveform80/gpio-zero
|
docs/examples/button_camera_1.py
|
Python
|
bsd-3-clause
| 327 | 0.006116 |
from gpiozero import Button
from picamera import PiCamera
from datetime import datetime
from signal import pause
button = Button(2)
camera = PiCamera()
def capture():
timestamp = datetime.now().isoformat()
camera.capture('/home/pi/{timest
|
amp}.jpg'.format(timestamp=timestamp))
button.when_pressed = capture
pause()
|
|
earthreader/web
|
earthreader/web/exceptions.py
|
Python
|
agpl-3.0
| 1,387 | 0 |
""":mod:`earthreader.web.exceptions` --- Exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from flask import jsonify
from werkzeug.exceptions import HTTPException
class IteratorNotFound(ValueError):
"""Raised when the iterator does not exist"""
class JsonException(HTTPException):
"""Base exception to return json response when raised.
Exceptions inherit this class must declare `error` and `message`.
"""
def get_response(self, environ=None):
r = jsonify(error=self.error, message=self.message)
r.status_code = 404
|
return r
class InvalidCategoryID(ValueError, JsonException):
"""Raised when the category ID is not valid."""
error = 'category-id-invalid'
message = 'Given category id is not valid'
class FeedNotFound(ValueError, JsonException):
"""Raised when the feed is not reachable."""
error = 'feed-not-found'
message = 'The feed you request does not exsist'
|
class EntryNotFound(ValueError, JsonException):
"""Raised when the entry is not reachable."""
error = 'entry-not-found'
message = 'The entry you request does not exist'
class WorkerNotRunning(ValueError, JsonException):
"""Raised when the worker thread is not running."""
error = 'worker-not-running'
message = 'The worker thread that crawl feeds in background is not' \
'running.'
|
daj0ker/BinPy
|
BinPy/examples/source/ic/Series_7400/IC7433.py
|
Python
|
bsd-3-clause
| 1,247 | 0.001604 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Usage of IC 7433
# <codecell>
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 7433:
ic = IC_7433()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {2: 0, 3: 0, 5: 0, 6: 0, 7: 0, 8: 1, 9: 1, 11: 1, 12: 1, 14: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0})
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic
|
.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC(
|
)
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(1, c)
print(c)
|
City-of-Helsinki/kore
|
kore/settings.py
|
Python
|
agpl-3.0
| 4,485 | 0.000892 |
"""
Django settings for kore project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9j++(0=dc&6w&113d4bofcjy1xy-pe$frla&=s*8w94=0ym0@&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'grappelli',
'nested_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'raven.contrib.django.raven_compat',
'django_extensions',
'rest_framework',
'corsheaders',
'modeltranslation',
'leaflet',
'munigeo',
'schools',
'django_filters'
]
if DEBUG:
# INSTALLED_APPS.insert(0, 'devserver')
# INSTALLED_APPS.insert(0, 'debug_toolbar')
pass
MIDDLEWARE_CLASSES = (
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'kore.urls'
WSGI_APPLICATION = 'kore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'kore',
}
}
# Munigeo
# https://github.com/City-of-Helsinki/munigeo
PROJECTION_SRID = 3067
# If no country specified (for example through a REST API call), use this
# as default.
DEFAULT_COUNTRY = 'fi'
# The word used for municipality in the OCD identifiers in the default country.
DEFAULT_OCD_MUNICIPALITY = 'kunta'
BOUNDING_BOX = [-548576, 6291456, 1548576, 8388608]
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda s: s
LANGUAGES = (
('fi', gettext('Finnish')),
('sv', gettext('Swedish')),
('en', gettext('English')),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "var", "static")
LOCALE_PATH = os.path.join(BASE_DIR, "schools", "locale")
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 20,
'MAX_PAGINATE_BY': 1000, # Maximum limit allowed when using `?page_size=xxx`.
'DEFAULT_FILTER_BACKENDS':
('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_
|
processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CORS_ORIGIN_ALLOW_ALL = True
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings
|
import *
except ImportError:
pass
|
plotly/python-api
|
packages/python/plotly/plotly/validators/funnelarea/_textfont.py
|
Python
|
mit
| 1,867 | 0.000536 |
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="textfont", parent_name="funnelarea", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference
|
in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only
|
a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
shakamunyi/neutron-dvr
|
neutron/plugins/bigswitch/plugin.py
|
Python
|
apache-2.0
| 51,022 | 0.000098 |
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_rpc_base
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class RestProxyCallbacks(n_rpc.RpcCallback,
sg_rpc_base.SecurityGroupServerRpcCallbackMixin,
dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def get_port_from_device(self, device):
port_id = re.sub(r"^tap", "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
|
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGr
|
oupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
|
gotropo/gotropo
|
create/efs.py
|
Python
|
gpl-3.0
| 2,807 | 0.006769 |
from troposphere import Tags,FindInMap, Ref, Template, Parameter,ImportValue, Ref, Output
from troposphere.efs import FileSystem, MountTarget
from troposphere.ec2 import SecurityGroup, SecurityGroupRule, Instance, Subnet
from create import export_ref, import_ref
from create.network import AclFactory, assoc_nacl_subnet
def efs_setup(template, ops, app_cfn_options, stack_name, stack_setup):
# Variable Declarations
vpc_id=ops.get('vpc_id')
efs_sg = app_cfn_options.network_names['tcpstacks'][stack_name]['sg_name']
efs_acl = app_cfn_options.network_names['tcpstacks'][stack_name]['nacl_name']
# Create EFS FIleSystem
efs_fs=FileSystem(
title='{}{}'.format(ops.app_name, stack_name),
FileSystemTags=Tags(Name='{}-{}'.format(ops.app_name, stack_name))
)
template.add_resource(efs_fs)
export_ref(template, '{}{}{}'.format(ops.app_name,stack_name,"Endpoint"), value=Ref(efs_fs), desc="Endpoint for EFS FileSystem")
# EFS FS Security Groups
efs_security_group=SecurityGroup(
title=efs_sg,
GroupDescription='Allow Access',
VpcId=vpc_id,
Tags=Tags(Name=efs_sg)
)
template.add_resource(efs_security_group)
export_ref(template, efs_sg, value=Ref(efs_sg), desc="Export for EFS Security Group")
# Create Network ACL for EFS Stack
efs_nacl = AclFactory(
template,
name=efs_acl,
vpc_id=ops.vpc_id,
in_networks=[val for key, val in sorted(ops.app_networks.items())],
in_ports=stack_
|
setup['ports'],
out_ports=ops.out_ports,
|
out_networks=[val for key, val in sorted(ops.app_networks.items())],
ssh_hosts=ops.get("deploy_hosts"),
)
export_ref(
template,
export_name=efs_acl,
value=Ref(efs_acl),
desc="{}{} stack".format("NetACL for", stack_name)
)
# Create Subnets for Mount Targets
for k, v in ops['tcpstacks']['EFS']['networks'].items():
efs_subnet=Subnet(
title='{}{}{}{}'.format(ops.app_name, stack_name, "MountTargetSubnet", k.split("-")[-1]),
AvailabilityZone=k,
CidrBlock=v,
VpcId=vpc_id,
Tags=Tags(Name='{}-{}-{}-{}'.format(ops.app_name, stack_name, "MountTargetSubnet", k.split("-")[-1]))
)
template.add_resource(efs_subnet)
assoc_name = '{}{}{}'.format(stack_name,"AclAssoc",k.split("-")[-1])
assoc_nacl_subnet(template, assoc_name, Ref(efs_acl), Ref(efs_subnet))
efs_mount_target=MountTarget(
title='{}{}{}'.format(ops.app_name, "EFSMountTarget", k.split("-")[-1]),
FileSystemId=Ref(efs_fs),
SecurityGroups=[Ref(efs_security_group)],
SubnetId=Ref(efs_subnet)
)
template.add_resource(efs_mount_target)
|
msdogan/pyvin
|
calvin/calvin.py
|
Python
|
mit
| 16,224 | 0.013745 |
import os
import sys
import logging
from pyomo.environ import *
from pyomo.opt import TerminationCondition
import numpy as np
import pandas as pd
class CALVIN():
def __init__(self, linksfile, ic=None, log_name="calvin"):
"""
Initialize CALVIN model object.
:param linksfile: (string) CSV file containing network link information
:param ic: (dict) Initial storage conditions for surface reservoirs
only used for annual optimizatio
|
n
:param log_name: A name for a logger - will be used to keep logs from different model runs separate in files.
Defaults to "calvin", which results in a log file in the current working directory named "calvin.log".
You can change this each time you instantiate the CALVIN class if you want to output separate lo
|
gs
for different runs. Otherwise, all results will be appended to the log file (not overwritten). If you
run multiple copies of CALVIN simultaneously, make sure to change this, or you could get errors writing
to the log file.
Do not provide a full path to a log file here because this value is also used in a way that is *not* a
file path. If being able to specify a full path is important for your workflow, please raise a GitHub
issue. It could be supported, but there is no need at this moment.
:returns: CALVIN model object
"""
# set up logging code
self.log = logging.getLogger(log_name)
if not self.log.hasHandlers(): # hasHandlers will only be True if someone already called CALVIN with the same log_name in the same session
self.log.setLevel("DEBUG")
screen_handler = logging.StreamHandler(sys.stdout)
screen_handler.setLevel(logging.INFO)
screen_formatter = logging.Formatter('%(levelname)s - %(message)s')
screen_handler.setFormatter(screen_formatter)
self.log.addHandler(screen_handler)
file_handler = logging.FileHandler("{}.log".format(log_name))
file_handler.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(file_formatter)
self.log.addHandler(file_handler)
df = pd.read_csv(linksfile)
df['link'] = df.i.map(str) + '_' + df.j.map(str) + '_' + df.k.map(str)
df.set_index('link', inplace=True)
self.df = df
self.linksfile = os.path.splitext(linksfile)[0] # filename w/o extension
# self.T = len(self.df)
SR_stats = pd.read_csv('calvin/data/SR_stats.csv', index_col=0).to_dict()
self.min_storage = SR_stats['min']
self.max_storage = SR_stats['max']
if ic:
self.apply_ic(ic)
# a few network fixes to make things work
self.add_ag_region_sinks()
self.fix_hydropower_lbs()
self.nodes = pd.unique(df[['i','j']].values.ravel()).tolist()
self.links = list(zip(df.i,df.j,df.k))
self.networkcheck() # make sure things aren't broken
def apply_ic(self, ic):
"""
Set initial storage conditions.
:param ic: (dict) initial storage values
:returns: nothing, but modifies the model object
"""
for k in ic:
ix = (self.df.i.str.contains('INITIAL') &
self.df.j.str.contains(k))
self.df.loc[ix, ['lower_bound','upper_bound']] = ic[k]
def inflow_multiplier(self, x):
"""
Multiply all network inflows by a constant.
:param x: (float) value to multiply inflows
:returns: nothing, but modifies the model object
"""
ix = self.df.i.str.contains('INFLOW')
self.df.loc[ix, ['lower_bound','upper_bound']] *= x
def eop_constraint_multiplier(self, x):
"""
Set end-of-period storage constraints as a fraction of maximum
available storage. Needed for limited foresight (annual) optimization.
:param x: (float) fraction of maximum storage to set lower bound
:returns: nothing, but modifies the model object
"""
for k in self.max_storage:
ix = (self.df.i.str.contains(k) &
self.df.j.str.contains('FINAL'))
lb = self.min_storage[k] + (self.max_storage[k]-self.min_storage[k])*x
self.df.loc[ix,'lower_bound'] = lb
self.df.loc[ix,'upper_bound'] = self.max_storage[k]
def no_gw_overdraft(self):
"""
Impose constraints to prevent groundwater overdraft
(not currently implemented)
"""
pass
def networkcheck(self):
"""
Confirm constraint feasibility for the model object.
(No inputs or outputs)
:raises: ValueError when infeasibilities are identified.
"""
nodes = self.nodes
links = self.df.values
num_in = {n: 0 for n in nodes}
num_out = {n: 0 for n in nodes}
lb_in = {n: 0 for n in nodes}
lb_out = {n: 0 for n in nodes}
ub_in = {n: 0 for n in nodes}
ub_out = {n: 0 for n in nodes}
# loop over links
for l in links:
lb = float(l[5])
ub = float(l[6])
num_in[l[1]] += 1
lb_in[l[1]] += lb
ub_in[l[1]] += ub
num_out[l[0]] += 1
lb_out[l[0]] += lb
ub_out[l[0]] += ub
if lb > ub:
raise ValueError('lb > ub for link %s' % (l[0]+'-'+l[1]))
for n in nodes:
if num_in[n] == 0 and n not in ['SOURCE','SINK']:
raise ValueError('no incoming link for ' + n)
if num_out[n] == 0 and n not in ['SOURCE','SINK']:
raise ValueError('no outgoing link for ' + n)
if ub_in[n] < lb_out[n]:
raise ValueError('ub_in < lb_out for %s (%d < %d)' % (n, ub_in[n], lb_out[n]))
if lb_in[n] > ub_out[n]:
raise ValueError('lb_in > ub_out for %s (%d > %d)' % (n, lb_in[n], ub_out[n]))
def add_ag_region_sinks(self):
"""
Hack to get rid of surplus water at no cost from agricultural regions.
Called internally when model is initialized.
:returns: nothing, but modifies the model object
"""
df = self.df
links = df[df.i.str.contains('HSU') & ~df.j.str.contains('DBUG')].copy(deep=True)
if not links.empty:
maxub = links.upper_bound.max()
links.j = links.apply(lambda l: 'SINK.'+l.i.split('.')[1], axis=1)
links.cost = 0.0
links.amplitude = 1.0
links.lower_bound = 0.0
links.upper_bound = maxub
links['link'] = links.i.map(str) + '_' + links.j.map(str) + '_' + links.k.map(str)
links.set_index('link', inplace=True)
self.df = self.df.append(links.drop_duplicates())
def fix_hydropower_lbs(self):
"""
Hack to fix lower bound constraints on piecewise hydropower links.
Storage piecewise links > 0 should have 0.0 lower bound, and
the k=0 pieces should always have lb = dead pool.
:returns: nothing, but modifies the model object
"""
def get_lb(link):
if link.i.split('.')[0] == link.j.split('.')[0]:
if link.k > 0:
return 0.0
elif link.i.split('.')[0] in self.min_storage:
return min(self.min_storage[link.i.split('.')[0]], link.lower_bound)
return link.lower_bound
ix = (self.df.i.str.contains('SR_') & self.df.j.str.contains('SR_'))
self.df.loc[ix, 'lower_bound'] = self.df.loc[ix].apply(get_lb, axis=1)
def remove_debug_links(self):
"""
Remove debug links from model object.
:returns: dataframe of links, excluding debug links.
"""
df = self.df
ix = df.index[df.index.str.contains('DBUG')]
df.drop(ix, inplace=True, axis=0)
self.nodes = pd.unique(df[['i','j']].values.ravel()).tolist()
self.links = list(zip(df.i,df.j,df.k))
return df
def create_pyomo_model(self, debug_mode=False, debug_cost=2e7):
"""
Use link data to create Pyomo model (constraints and objective function)
But do not solve yet.
:param debug_mode: (boolean) Whether to run in debug mode.
Use when there may be infeasibilities in the network.
:param debug_cost: When in debug mode, assign this cost ($/AF) to flow on debug links.
This should be an arbitrarily high number.
:returns: nothing, but creates the model object (self.model)
"""
# work on a local copy of the dataframe
if not debug_mode and self.df.index.str.contains('DBUG').any():
# pre
|
wangjohn/wallace
|
wallace/categorical_variable_encoder.py
|
Python
|
mit
| 1,836 | 0.003268 |
from collections import defaultdict
class CategoricalVariableEncoder(object):
def convert_categorical_variables(self, data_matrix, category_indices, category_value_mapping=None):
if len(category_indices) == 0:
return data_matrix
if category_value_mapping == None:
category_value_mapping = self.get_category_value_mapping(data_matrix, category_indices)
for i in xrange(len(data_matrix)):
for category_index in category_indices:
current_data_value = data_matrix[i][category_index]
updated_data_value = category_value_mapping[category_index][current_data_value]
data_matrix[i][category_index] = updated_data_value
return data_matrix
def get_category_value_mapping(self, data_matrix, category_indices):
categories = self.get_category_values(data_matrix, category_indices)
category_value_mapping = {}
for category_index, values_set in categories.iteritems():
category_value_mapping[category_
|
index] = self.create_value_map(values_set)
return category_value_mapping
def get_category_values(self, data_matrix, category_indices):
categories = {}
for category_index in category_indices:
categories[catego
|
ry_index] = set()
for i in xrange(len(data_matrix)):
for category_index in category_indices:
category_value = data_matrix[i][category_index]
categories[category_index].add(category_value)
return categories
@classmethod
def create_value_map(self, values_set):
sorted_values_set = sorted(values_set)
value_mapping = {}
for i in xrange(len(sorted_values_set)):
value_mapping[sorted_values_set[i]] = i
return value_mapping
|
JoshuaSkelly/lunch-break-rl
|
ai/actions/moveaction.py
|
Python
|
mit
| 385 | 0 |
from ai import action
class MoveAction(action.Action):
def __init__(self, performer, direction):
|
super().__init__(performer)
self.direction = direction
def prerequisite(self):
if not self.direction:
return False
return self.performer.can_move(*self.di
|
rection)
def perform(self):
self.performer.move(*self.direction)
|
peplin/astral
|
astral/api/handlers/ticket.py
|
Python
|
mit
| 4,822 | 0.002489 |
from tornado.web import HTTPError
import datetime
import threading
from astral.api.client import TicketsAPI
from astral.api.handlers.base import BaseHandler
from astral.api.handlers.tickets import TicketsHandler
from astral.models import Ticket, Node, Stream, session
import logging
log = logging.getLogger(__name__)
class TicketHandler(BaseHandler):
def _load_ticket(self, stream_slug, destination_uuid):
stream = Str
|
eam.get_by(slug=stream_slug)
if not destination_uuid:
return Ticket.get_by(stream=stream, destination=Node.me())
node = Node.get_by(uuid=destination_uuid)
return Ticket.query.filter_by(stream=stream, destination=node).first()
def delete(self, stream_slug, destination_uuid=None):
"""Stop forwardin
|
g the stream to the requesting node."""
ticket = self._load_ticket(stream_slug, destination_uuid)
if not ticket:
raise HTTPError(404)
ticket.delete()
session.commit()
TicketDeletionPropagationThread(ticket, self.request).start()
def get(self, stream_slug, destination_uuid=None):
ticket = self._load_ticket(stream_slug, destination_uuid)
if ticket:
# TODO this block is somewhat duplicated from TicketsHandler.post,
# where we refresh an existing ticket.
if not ticket.source == Node.me():
log.info("Refreshing %s with the source", ticket)
ticket = TicketsHandler._request_stream_from_node(ticket.stream,
ticket.source, ticket.destination,
existing_ticket=ticket)
if ticket:
ticket.refreshed = datetime.datetime.now()
# In case we lost the tunnel, just make sure it exists
ticket.queue_tunnel_creation()
session.commit()
# TODO this is unideal, but we need to get the new port if it
# changed. combination of sleep and db flush seems to do it
# somewhat reliably, but it's still a race condition.
import time
time.sleep(0.5)
ticket = self._load_ticket(stream_slug, destination_uuid)
self.write({'ticket': ticket.to_dict()})
def put(self, stream_slug, destination_uuid=None):
"""Edit tickets, most likely just confirming them."""
ticket = self._load_ticket(stream_slug, destination_uuid)
if ticket:
ticket.confirmed = self.get_json_argument('confirmed')
if ticket.confirmed:
log.info("Confirmed %s", ticket)
session.commit()
class TicketDeletionPropagationThread(threading.Thread):
"""When a ticket is deleted, we may need to inform other nodes or find a
replacement for ourselves. We don't want to do this in-band with the delete
request because it can cause dead locking of API requests between nodes.
"""
def __init__(self, ticket, request):
super(TicketDeletionPropagationThread, self).__init__()
self.ticket = ticket
self.request = request
def run(self):
if self.ticket.confirmed and not self.ticket.source == Node.me():
if self.ticket.destination == Node.me():
if self.request.remote_ip == '127.0.0.1':
log.info("User is canceling %s -- must inform sender",
self.ticket)
TicketsAPI(self.ticket.source.uri()).cancel(
self.ticket.absolute_url())
else:
log.info("%s is being deleted, we need to find another for "
"ourselves", self.ticket)
try:
TicketsHandler.handle_ticket_request(self.ticket.stream,
self.ticket.destination)
except HTTPError, e:
log.warning("We lost %s and couldn't find a "
"replacement to failover -- our stream is "
"dead: %s", self.ticket, e)
elif self.request.remote_ip == self.ticket.source.ip_address:
log.info("%s is being deleted by the source, must inform the "
"target %s", self.ticket, self.ticket.destination)
TicketsAPI(self.ticket.destination.uri()).cancel(
self.ticket.absolute_url())
elif self.request.remote_ip == self.ticket.destination.ip_address:
log.info("%s is being deleted by the destination, must inform "
"the source %s", self.ticket, self.ticket.source)
TicketsAPI(self.ticket.source.uri()).cancel(
self.ticket.absolute_url())
|
YU6326/YU6326.github.io
|
code/photogrammetry/inner_orientation.py
|
Python
|
mit
| 3,899 | 0.020569 |
import tkinter.filedialog as tkFileDialog
import numpy as np
from numpy import sin,cos
import os
def InnerOrientation(mat1,mat2):
"""
mat1 为像素坐标,4*2,mat2为理论坐标4*2,
h0,h1,h2,k0,k1,k2,这六个参数由下列矩阵定义:
[x]=[h0]+[h1 h2] [i]
[y]=[k0]+[k1 k2] [j]
返回6个定向参数的齐次矩阵,x方向单位权方差,y方向单位权方差
[h1 h2 h0]
[k1 k2 k0]
[0 0 1 ]
"""
# mat1=np.matrix(mat1)
# mat2=np.matrix(mat2)
y=mat2.ravel()
y=y.T
xlist=[]
for i in range(int(y.size/2)):
x0=np.matrix([[1,mat1[i,0],mat1[i,1],0,0,0],[0,0,0,1,mat1[i,0],mat1[i,1]]])
xlist.append(x0)
x=np.vstack(xlist)
# print(x)
N=np.linalg.inv(x.T @ x)
beta=N @ x.T @ y
# print(beta)
r=(np.size(y)-6)
e=y-x@beta
ex=e[0::2]
ey=e[1::2]
sigmax=(np.linalg.norm(ex)/r)
sigmay=(np.linalg.norm(ey)/r)
# print(sigmax)
# print(sigmay)
return(np.matrix([[beta[1,0],beta[2,0],beta[0,0]],[beta[4,0],beta[5,0],beta[3,0]],[0,0,1]]),sigmax,sigmay)
def openkbfile():
#default_dir = r"C:\Users\lenovo\Desktop" # 设置默认打开目录
fname = tkFileDialog.askopenfilename(title=u"选择文件",filetypes=[("kb file", "*.kb"), ("all", "*.*")],initialdir=r"D:\学习\摄影测量\摄影测量实验数据-后方交会、前方交会")
f=open(fname,mode='r')
lines=f.readlines()
f.close()
mat=[]
for line in lines:
t=line.split()
mat.append([float(t[0]),float(t[1])])
#initialdir=(os.path.expanduser(default_dir))
# print(fname) # 返回文件全路径
mat1=mat[0::2]
mat2=mat[1::2]
mat,sigmax2,sigmay2=InnerOrientation(np.matrix(mat1),np.matrix(mat2))
print(mat,sigmax2,sigmay2)
# def transform(mat,coormat):
# """
# mat:齐次矩阵,由InnerOrientation获得
# coormat:
|
齐次坐标:即每列第三个元素为1,每个坐标均为列向量。列数不限。
# 返回:转换后的坐标
# """
# return mat@coormat
# def openaofile():
# fname = tkFileDialog.askopenfilename(title=u"选择文件",filetypes=[("ao.txt file", "*.txt"), ("all", "*.*")],initialdir=r"D:\学习\摄影测量\摄影测量实验数据-后方交会、前方交会")
# f=open(fname,mode='r')
# lines=f.readlines()
# f.close()
# matimage=[]
# matg
|
round=[]
# for line in lines[1:]:
# t=line.split()
# matimage.append([float(t[0]),float(t[1])])
# matground.append([float(t[2]),float(t[3]),float(t[4])])
# return(np.matrix(matimage),np.matrix(matground))
# def resection():
# matimage,matground=openaofile()
# dist=np.linalg.norm(matimage[1]-matimage[0])
# Dist=np.linalg.norm(matground[1]-matground[0])
# matimage=matimage.T
# matground=matground.T
# n=dist.shape[0]
# assert n==5
# m=Dist/dist
# x0,y0,f=0,0,210.681 #均以毫米作单位
# Xs0,Ys0,H=np.average(matground,axis=0)
# H+=m*f
# phi,omega,kappa=0,0,0
# R=np.zeros((3,3))
# R[0,0]=cos(phi)*cos(kappa)-sin(phi)*sin(omega)*sin(kappa)
# R[0,1]=-cos(phi)*sin(kappa)-sin(phi)*sin(omega)*cos(kappa)
# R[0,2]=-sin(phi)*cos(omega)
# R[1,0]=cos(omega)*sin(kappa)
# R[1,1]=cos(omega)*cos(kappa)
# R[1,2]=-sin(omega)
# R[2,0]=sin(phi)*cos(kappa)+cos(phi)*sin(omega)*sin(kappa)
# R[2,1]=-sin(phi)*sin(kappa)+cos(phi)*sin(omega)*cos(kappa)
# R[2,2]=cos(phi)*cos(omega)
# matimage1=np.zeros((2,5))
# S=np.matrix([Xs0,Ys0,H]).T
# Alist=[]
# Vlist=[]
# Llist=[]
# for i in range(5):
# u=matground[:,i]-S
# matimage1[0,i]=-f*np.dot(R[0],u)/np.dot(R[2],u)
# matimage1[1,i]=-f*np.dot(R[1],u)/np.dot(R[2],u)
# zba=np.dot(R[2],u)
# A=np.zeros(2,6)
# # A[0,0]=(R[0,0]*f+R[0,2]*matimage[])[]
if __name__=="__main__":
openkbfile()
|
ta2-1/pootle
|
pootle/apps/accounts/__init__.py
|
Python
|
gpl-3.0
| 328 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This fi
|
le is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
default_app_config = 'accounts.apps.AccountsConfig
|
'
|
BilalDev/HolyScrap
|
src/hsimage.py
|
Python
|
apache-2.0
| 1,247 | 0.000802 |
import sys
from PIL import Image
img = Image.open(sys.argv[1])
width, height = img.size
xblock = 5
yblock = 5
w_width = width / xblock
w_height = height / yblock
blockmap = [(xb*w_width, yb*w_height, (xb+1)*w_width, (yb+1)*w_height)
for xb in xrange(xblock) for yb in xrange(yblock)]
newblockmap = list(blockmap)
newblockmap[0] = blockmap[14]
newblockmap[1] = blockmap[13]
newblockmap[2] = blockmap[12]
newblockmap[3] = blockmap[11]
newblockmap[4] = blockmap[10]
newblockmap[5] = blockmap[24]
newblockmap[6] = blockmap[23]
newblockmap[7] = blockmap[22]
newblockmap[8] = blockmap[21]
newblockmap[9] = block
|
map[20]
newblockmap[10] = blockmap[4]
newblockmap[11] = blockmap[3]
newblockmap[12] = blockmap[2]
newblockmap[13] = blockmap[1]
newblockmap[14] = blockmap[0]
newblockmap[15] = blockmap[19]
newblockmap[16] = blockmap[18]
newblockmap[17] = blockmap[17]
newblockmap[18] = blockmap[16]
newblockmap[19] = blockmap[15]
newblockmap[20] = blockmap[9]
newblockmap[21] = blockmap[8]
newblockmap[22] = blockmap[7]
newblockmap[23]
|
= blockmap[6]
newblockmap[24] = blockmap[5]
result = Image.new(img.mode, (width, height))
for box, sbox in zip(blockmap, newblockmap):
c = img.crop(sbox)
result.paste(c, box)
result.save(sys.argv[1])
|
LilithWittmann/froide
|
froide/foisite/models.py
|
Python
|
mit
| 1,944 | 0.001029 |
from django.db import models
from django.conf import settings
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class FoiSite(models.Model):
country_code = models.CharField(_('Country Code'), max_length=5)
country_name = models.CharField(_('Country Name'), max_length=255)
name = models.CharField(_('Name'), max_length=255)
url = models.CharField(_('URL'), max_length=255)
text = models.TextField(_('Text'), blank=True)
enabled = models.BooleanField(_('Enabled'), default=True)
class Meta:
verbose_name = _('FOI Site')
verbose_name_plural = _('FOI Sites')
def __str__(self):
return u'%s (%s)' % (self.name, self.country_name)
def save(self, *args, **kwargs):
self.country
|
_code = self.country_code.upper()
super(FoiSite, self).save(*
|
args, **kwargs)
try:
from django.contrib.gis.geoip import GeoIP
except ImportError:
GeoIP = None # noqa
class SiteAdivsor(object):
def __init__(self):
self.geoip = GeoIP()
self.sites = None
def update(self):
sites = FoiSite.objects.filter(enabled=True)
self.sites = dict([(f.country_code, f) for f in sites])
def refresh(self):
self.sites = None
def get_site(self, ip):
if self.sites is None:
self.update()
result = self.geoip.country(ip)
return self.sites.get(result['country_code'], None)
class DummyAdvisor(object):
def refresh(self):
pass
def get_site(self, ip):
pass
if GeoIP and getattr(settings, 'GEOIP_PATH', False):
advisor = SiteAdivsor()
else:
advisor = DummyAdvisor()
@receiver(models.signals.post_save, sender=FoiSite,
dispatch_uid="foisite_saved")
def foisite_saved(instance=None, created=False, **kwargs):
advisor.refresh()
|
matt77hias/FileUtils
|
src/name.py
|
Python
|
gpl-3.0
| 249 | 0.02008 |
import os
def remove_fname_extension(fname
|
):
return os.path.splitext(fname)[0]
def change_fname_extension(fname, extension):
return remove_fname_extension(fname) + '.' + ex
|
tension
def concat(path, fname):
return path + '/' + fname
|
jamesnw/HelpfulAppStoreBot
|
kill_bot.py
|
Python
|
gpl-2.0
| 277 | 0.00361 |
#!/usr/bin/python
import psutil
import signal
|
#From https://github.com/getchar/rbb_article
target = "HelpfulAppStore"
# scan through processes
for proc
|
in psutil.process_iter():
if proc.name() == target:
print(" match")
proc.send_signal(signal.SIGUSR1)
|
amenonsen/ansible
|
test/lib/ansible_test/_internal/provider/layout/ansible.py
|
Python
|
gpl-3.0
| 1,396 | 0.002149 |
"""Layout provider for Ansible source."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ... import types as t
from . import (
ContentLayout,
LayoutProvider,
)
class AnsibleLayout(LayoutProvider):
"""Layout provider for Ansible source."""
@staticmethod
def is_content_root(path): # type: (str) -> bool
"""Return True if the given path is a content root for this provider."""
return os.path.exists(os.path.join(path, 'setup.py')) and os.path.exists(os.path.join(path, 'bin/ansible-test'))
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a Layout using the given root and paths."""
plugin_paths = dict((p, os.path.join('lib/ansible/plugins', p)) for p in self.PLUGIN_TYPES)
plugin_paths.update(dict(
modules='lib/ansible/modules',
module_utils='lib/ansible/module_utils',
))
return ContentLayout(root,
|
paths,
plugin_paths=plugin_paths,
integration_path='test/integration',
unit_path='test/units',
unit_module_path='test/units/modules',
unit_module_utils_path='test/units/module_utils',
|
)
|
bswartz/cinder
|
cinder/volume/rpcapi.py
|
Python
|
apache-2.0
| 15,156 | 0 |
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume RPC API.
"""
from oslo_config import cfg
from oslo_serialization import jsonutils
from cinder import quota
from cinder import rpc
from cinder.volume import utils
CONF = cfg.CONF
QUOTAS = quota.QUOTAS
class VolumeAPI(rpc.RPCAPI):
"""Client side of the volume rpc API.
API version history:
.. code-block:: none
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
1.5 - Add accept_transfer.
1.6 - Add extend_volume.
1.7 - Adds host_name parameter to attach_volume()
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
1.11 - Adds mode parameter to attach_volume()
to support volume read-only attaching.
1.12 - Adds retype.
1.13 - Adds create_export.
1.14 - Adds reservation parameter to extend_volume().
1.15 - Adds manage_existing and unmanage_only flag to delete_volume.
1.16 - Removes create_export.
1.17 - Add replica option to create_volume, promote_replica and
sync_replica.
1.18 - Adds create_consistencygroup, delete_consistencygroup,
create_cgsnapshot, and delete_cgsnapshot. Also adds
the consistencygroup_id parameter in create_volume.
1.19 - Adds update_migrated_volume
1.20 - Adds support for sending objects over RPC in create_snapshot()
and delete_snapshot()
1.21 - Adds update_consistencygroup.
1.22 - Adds create_consistencygroup_from_src.
1.23 - Adds attachment_id to detach_volume.
1.24 - Removed duplicated parameters: snapshot_id, image_id,
source_volid, source_replicaid, consistencygroup_id and
cgsnapshot_id from create_volume. All off them are already
passed either in request_spec or available in the DB.
1.25 - Add source_cg to create_consistencygroup_from_src.
1.26 - Adds support for sending objects over RPC in
create_consistencygroup(), create_consistencygroup_from_src(),
update_consistencygroup() and delete_consistencygroup().
1.27 - Adds support for replication V2
1.28 - Adds manage_existing_snapshot
1.29 - Adds get_capabilities.
1.30 - Adds remove_export
1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot()
and delete_cgsnapshot() to cast method only with necessary
args. Forwarding CGSnapshot object instead of CGSnapshot_id.
1.32 - Adds support for sending objects over RPC in create_volume().
1.33 - Adds support for sending objects over RPC in delete_volume().
1.34 - Adds support for sending objects over RPC in retype().
1.35 - Adds support for sending objects over RPC in extend_volume().
1.36 - Adds support for sending objects over RPC in migrate_volume(),
migrate_volume_completion(), and update_migrated_volume().
1.37 - Adds old_reservations parameter to retype to support quota
checks in the API.
1.38 - Scaling backup service, add get_backup_device() and
secure_file_operations_enabled()
1.39 - Update replication methods to reflect new backend rep strategy
1.40 - Add cascade option to delete_volume().
... Mitaka supports messaging version 1.40. Any changes to existing
methods in 1.x after that point should be done so that they can handle
the version_cap being set to 1.40.
2.0 - Remove 1.x compatibility
2.1 - Add get_manageable_volumes() and get_manageable_snapshots().
2.2 - Adds support for sending objects over RPC in manage_existing().
2.3 - Adds support for sending objects over RPC in
initialize_connection().
"""
RPC_API_VERSION = '2.3'
TOPIC = CONF.volume_topic
BINARY = 'cinder-volume'
def _compat_ver(self, current, *legacy):
versions = (current,) + legacy
for version in versions[:-1]:
if self.client.can_send_version(version):
return version
return versions[-1]
def _get_cctxt(self, host, version):
new_host = utils.get_volume_rpc_host(host)
return self.client.prepare(server=new_host, version=version)
def create_consistencygroup(self, ctxt, group, host):
cctxt = self._get_cctxt(host, '2.0')
cctxt.cast(ctxt, 'create_consistencygroup',
group=group)
def delete_consistencygroup(self, ctxt, group):
cctxt = self._get_cctxt(group.host, '2.0')
cctxt.cast(ctxt, 'delete_consistencygroup',
group=group)
def update_consistencygroup(self, ctxt, group, add_volumes=None,
remove_volumes=None):
cctxt = self._get_cctxt(group.host, '2.0')
cctxt.cast(ctxt, 'update_consistencygroup',
|
group=group,
add_volumes=add_volumes,
remov
|
e_volumes=remove_volumes)
def create_consistencygroup_from_src(self, ctxt, group, cgsnapshot=None,
source_cg=None):
cctxt = self._get_cctxt(group.host, '2.0')
cctxt.cast(ctxt, 'create_consistencygroup_from_src',
group=group,
cgsnapshot=cgsnapshot,
source_cg=source_cg)
def create_cgsnapshot(self, ctxt, cgsnapshot):
cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, '2.0')
cctxt.cast(ctxt, 'create_cgsnapshot', cgsnapshot=cgsnapshot)
def delete_cgsnapshot(self, ctxt, cgsnapshot):
cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, '2.0')
cctxt.cast(ctxt, 'delete_cgsnapshot', cgsnapshot=cgsnapshot)
def create_volume(self, ctxt, volume, host, request_spec,
filter_properties, allow_reschedule=True):
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt = self._get_cctxt(host, '2.0')
cctxt.cast(ctxt, 'create_volume', volume_id=volume.id,
request_spec=request_spec_p,
filter_properties=filter_properties,
allow_reschedule=allow_reschedule, volume=volume)
def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False):
cctxt = self._get_cctxt(volume.host, '2.0')
cctxt.cast(ctxt, 'delete_volume', volume_id=volume.id,
unmanage_only=unmanage_only, volume=volume, cascade=cascade)
def create_snapshot(self, ctxt, volume, snapshot):
cctxt = self._get_cctxt(volume['host'], '2.0')
cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'],
snapshot=snapshot)
def delete_snapshot(self, ctxt, snapshot, host, unmanage_only=False):
cctxt = self._get_cctxt(host, '2.0')
cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot,
unmanage_only=unmanage_only)
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
cctxt =
|
ossifrage/dynamicforms
|
dynamicforms/settings.py
|
Python
|
bsd-3-clause
| 1,384 | 0.002168 |
# -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get('DYNAMICFORMS_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example' # TODO: Change me
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path
|
.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = Tru
|
e
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
|
nagyistoce/devide
|
external/wxPyPlot.py
|
Python
|
bsd-3-clause
| 57,752 | 0.015844 |
#!/usr/bin/env python2.2
#-----------------------------------------------------------------------------
# Name: wxPyPlot.py
# Purpose:
#
# Author: Gordon Williams
#
# Created: 2003/11/03
# RCS-ID: $Id$
# Copyright: (c) 2002
# Licence: Use as you wish.
#-----------------------------------------------------------------------------
"""
This is a simple light weight plotting module that can be used with Boa or
easily integrated into your own wxPython application. The emphasis is on small
size and fast plotting for large data sets. It has a reasonable number of
features to do line and scatter graphs easily. It is not as sophisticated or as
powerful as SciPy Plt or Chaco. Both of these are great packages but consume
huge amounts of computer resources for simple plots. They can be found at
http://scipy.com
This file contains two parts; first the re-usable library stuff, then, after
a "if __name__=='__main__'" test, a simple frame and a few default plots
for examples and testing.
Based on wxPlotCanvas
Written by K.Hinsen, R. Srinivasan;
Ported to wxPython Harm van der Heijden, feb 1999
Major Additions Gordon Williams Feb. 2003 (g_will@cyberus.ca)
-More style options
-Zooming using mouse "rubber band"
-Scroll left, right
-Grid(graticule)
-Printing, preview, and page set up (margins)
-Axis and title labels
-Cursor xy axis values
-Doc strings and lots of comments
-Optimizations for large number of points
-Legends
Did a lot of work here to speed markers up. Only a factor of 4 improvement
though. Lines are much faster than markers, especially filled markers. Stay
away from circles and triangles unless you only have a few thousand points.
Times for 25,000 points
Line - 0.078 sec
Markers
Square - 0.22 sec
dot - 0.10
circle - 0.87
cross,plus - 0.28
triangle, triangle_down - 0.90
Thanks to Chris Barker for getting this version working on Linux.
Zooming controls with mouse (when enabled):
Left mouse drag - Zoom box.
Left mouse double click - reset zoom.
Right mouse click - zoom out centred on click location.
"""
import wx
import time, string
# Needs Numeric
try:
import Numeric
except:
try:
import numarray as Numeric #if numarray is used it is renamed Numeric
except:
msg= """
This module requires the Numeric or numarray module,
which could not be imported. It probably is not installed
(it's not part of the standard Python distribution). See the
Python site (http://www.python.org) for information on
downloading source or binaries."""
raise ImportError, "Numeric or numarray not found. \n" + msg
try:
True
except NameError:
True = 1==1
False = 1==0
#
# Plotting classes...
#
class PolyPoints:
"""Base Class for lines and markers
- All methods are private.
"""
def __init__(self, points, attr):
self.points = Numeric.array(points)
self.currentScale= (1,1)
self.currentShift= (0,0)
self.scaled = self.points
self.attributes = {}
self.attributes.update(self._attributes)
for name, value in attr.items():
if name not in self._attributes.keys():
raise KeyError, "Style attribute incorrect. Should be one of %s" %self._attributes.keys()
self.attributes[name] = value
def boundingBox(self):
if len(self.points) == 0:
#no curves to draw
#defaults to (-1,-1) and (1,1) but axis can be set in Draw
minXY= Numeric.array([-1,-1])
maxXY= Numeric.array([ 1, 1])
else:
minXY= Numeric.minimum.reduce(self.points)
maxXY= Numeric.maximum.reduce(self.points)
return minXY, maxXY
def scaleAndShift(self, scale=(1,1), shift=(0,0)):
if len(self.points) == 0:
#no curves to draw
return
if (scale is not self.currentScale) or (shift is not self.currentShift):
#update point scaling
self.scaled = scale*self.points+shift
self.currentScale= scale
self.currentShift= shift
#else unchanged use the current scaling
def getLegend(self):
return self.attributes['legend']
class PolyLine(PolyPoints):
"""Class to define line type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.SOLID,
'legend': ''}
def __init__(self, points, **attr):
"""Creates PolyLine object
points - sequence (array, tuple or list) of (x,y) points making up line
**attr - key word attributes
Defaults:
'colour'= 'black', - wxPen Colour any wxNamedColour
'width'= 1, - Pen width
'style'= wxSOLID, - wxPen style
'legend'= '' - Line Legend to display
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale
style= self.attributes['style']
dc.SetPen(wx.Pen(wx.NamedColour(colour), int(width), style))
if coord == None:
dc.DrawLines(self.scaled)
else:
dc.DrawLines(coord) #draw legend line
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
h= self.attributes['width'] * printerScale
w= 5 * h
return (w,h)
class PolyMarker(PolyPoints):
"""Class to define marker type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'size': 2,
'fillcolour': None,
'fillstyle': wx.SOLID,
'marker': 'circle',
'legend': ''}
def __init__(self, points, **attr):
"""Creates PolyMarker object
points - sequence (array, tuple or list) of (x,y) points
**attr - key word attributes
Defaults:
'colour'= 'black', - wxPen Colour any wxNamedColour
'w
|
idth'= 1, - Pen width
'size'= 2, - Marker size
'fillcolour'= same as colour,
|
- wxBrush Colour any wxNamedColour
'fillstyle'= wx.SOLID, - wxBrush fill style (use wxTRANSPARENT for no fill)
'marker'= 'circle' - Marker shape
'legend'= '' - Marker Legend to display
Marker Shapes:
- 'circle'
- 'dot'
- 'square'
- 'triangle'
- 'triangle_down'
- 'cross'
- 'plus'
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale
size = self.attributes['size'] * printerScale
fillcolour = self.attributes['fillcolour']
fillstyle = self.attributes['fillstyle']
marker = self.attributes['marker']
dc.SetPen(wx.Pen(wx.NamedColour(colour),int(width)))
if fillcolour:
dc.SetBrush(wx.Brush(wx.NamedColour(fillcolour),fillstyle))
else:
dc.SetBrush(wx.Brush(wx.NamedColour(colour), fillstyle))
if coord == None:
self._drawmarkers(dc, self.scaled, marker, size)
else:
self._drawmarkers(dc, coord, marker, size) #draw legend marker
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
s= 5*self.attributes['size'] * printerScale
return (s,s)
def _drawmarkers(self, dc, coords, marker,size=1):
f = eval('self.
|
CTSRD-SOAAP/chromium-42.0.2311.135
|
v8/tools/testrunner/local/pool_unittest.py
|
Python
|
bsd-3-clause
| 1,222 | 0.00982 |
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pool import Pool
def Run(x):
if x == 10:
raise Exception("Expected exception triggered by test.")
return x
class PoolTest(unittest.TestCase):
def testNormal(self):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
results.add(result)
self.assertEquals(set(range(0, 10)), results)
def testException(self):
results = set()
pool = Pool
|
(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
# Item 10 will not appear in results due to an internal exception.
results.add(result)
expect = set(range(0, 12))
expect.remove(10)
self.as
|
sertEquals(expect, results)
def testAdd(self):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
results.add(result)
if result < 30:
pool.add([result + 20])
self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
results)
|
andersk/zulip
|
zerver/lib/remote_server.py
|
Python
|
apache-2.0
| 7,292 | 0.00192 |
import logging
import urllib
from typing import Any, Dict, List, Mapping, Tuple, Union
import orjson
import requests
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.translation import gettext as _
from analytics.models import InstallationCount, RealmCount
from version import ZULIP_VERSION
from zerver.lib.exceptions import JsonableError
from zerver.lib.export import floatify_datetime_fields
from zerver.lib.outgoing_http import OutgoingSession
from zerver.models import RealmAuditLog
class PushBouncerSession(OutgoingSession):
def __init__(self) -> None:
super().__init__(role="push_bouncer", timeout=30)
class PushNotificationBouncerException(Exception):
pass
class PushNotificationBouncerRetryLaterError(JsonableError):
http_status_code = 502
def send_to_push_bouncer(
method: str,
endpoint: str,
post_data: Union[bytes, Mapping[str, Union[str, int, None, bytes]]],
extra_headers: Mapping[str, str] = {},
) -> Dict[str, object]:
"""While it does actually send the notice, this function has a lot of
code and comments around error handling for the push notifications
bouncer. There are several classes of failures, each with its own
potential solution:
* Network errors with requests.request. We raise an exception to signal
it to the callers.
* 500 errors from the push bouncer or other unexpected responses;
we don't try to parse the response, but do make clear the cause.
* 400 errors from the push bouncer. Here there are 2 categories:
Our server failed to connect to the push bouncer (should throw)
vs. client-side errors like an invalid token.
"""
assert settings.PUSH_NOTIFICATION_BOUNCER_URL is not None
url = urllib.parse.urljoin(
settings.PUSH_NOTIFICATION_BOUNCER_URL, "/api/v1/remotes/" + endpoint
)
api_auth = requests.auth.HTTPBasicAuth(settings.ZULIP_ORG_ID, settings.ZULIP_ORG_KEY)
headers = {"User-agent": f"ZulipServer/{ZULIP_VERSION}"}
headers.update(extra_headers)
try:
res = PushBouncerSession().request(
method, url, data=post_data, auth=api_auth, verify=True, headers=headers
)
except (
requests.exceptions.Timeout,
requests.exceptions.SSLError,
requests.exceptions.ConnectionError,
) as e:
raise PushNotificationBouncerRetryLaterError(
f"{e.__class__.__name__} while trying to connect to push notification bouncer"
)
if res.status_code >= 500:
# 500s should be resolved by the people who run the push
# notification bouncer service, and they'll get an appropriate
# error notification from the server. We raise an exception to signal
# to the callers that the attempt failed and they can retry.
error_msg = "Received 500 from push notification bouncer"
logging.warning(error_msg)
raise PushNotificationBouncerRetryLaterError(error_msg)
elif res.status_code >= 400:
# If JSON parsing errors, just let that exception happen
result_dict = orjson.loads(res.content)
msg = result_dict["msg"]
if "code" in result_dict and result_dict["code"] == "INVALID_ZULIP_SERVER":
# Invalid Zulip server credentials should email this server's admins
raise PushNotificationBouncerException(
_("Push notifications bouncer error: {}").format(msg)
)
else:
# But most other errors coming from the push bouncer
# server are client errors (e.g. never-registered token)
# and should be handled as such.
raise JsonableError(msg)
elif res.status_code != 200:
# Anything else is unexpected and likely suggests a bug in
# this version of Zulip, so we throw an exception that will
# email the server admins.
raise PushNotificationBouncerException(
f"Push notification bouncer returned unexpected status code {res.status_code}"
)
# If we don't throw an exception, it's a successful bounce!
return orjson.l
|
oads(res.content)
def send_json_to_push_bouncer(
method: str, endpoint: str, post_data: Mapping[str, object]
) -> Dict[str, object]:
return send_to_push_bouncer(
method,
endpoint,
orjson.dumps(post_data),
extra_headers={"Content-type": "application/json"},
)
REALMAUDITLOG_
|
PUSHED_FIELDS = [
"id",
"realm",
"event_time",
"backfilled",
"extra_data",
"event_type",
]
def build_analytics_data(
realm_count_query: Any, installation_count_query: Any, realmauditlog_query: Any
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]:
# We limit the batch size on the client side to avoid OOM kills timeouts, etc.
MAX_CLIENT_BATCH_SIZE = 10000
data = {}
data["analytics_realmcount"] = [
model_to_dict(row) for row in realm_count_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
data["analytics_installationcount"] = [
model_to_dict(row)
for row in installation_count_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
data["zerver_realmauditlog"] = [
model_to_dict(row, fields=REALMAUDITLOG_PUSHED_FIELDS)
for row in realmauditlog_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
floatify_datetime_fields(data, "analytics_realmcount")
floatify_datetime_fields(data, "analytics_installationcount")
floatify_datetime_fields(data, "zerver_realmauditlog")
return (
data["analytics_realmcount"],
data["analytics_installationcount"],
data["zerver_realmauditlog"],
)
def send_analytics_to_remote_server() -> None:
# first, check what's latest
try:
result = send_to_push_bouncer("GET", "server/analytics/status", {})
except PushNotificationBouncerRetryLaterError as e:
logging.warning(e.msg)
return
last_acked_realm_count_id = result["last_realm_count_id"]
last_acked_installation_count_id = result["last_installation_count_id"]
last_acked_realmauditlog_id = result["last_realmauditlog_id"]
(realm_count_data, installation_count_data, realmauditlog_data) = build_analytics_data(
realm_count_query=RealmCount.objects.filter(id__gt=last_acked_realm_count_id),
installation_count_query=InstallationCount.objects.filter(
id__gt=last_acked_installation_count_id
),
realmauditlog_query=RealmAuditLog.objects.filter(
event_type__in=RealmAuditLog.SYNCED_BILLING_EVENTS, id__gt=last_acked_realmauditlog_id
),
)
if len(realm_count_data) + len(installation_count_data) + len(realmauditlog_data) == 0:
return
request = {
"realm_counts": orjson.dumps(realm_count_data).decode(),
"installation_counts": orjson.dumps(installation_count_data).decode(),
"realmauditlog_rows": orjson.dumps(realmauditlog_data).decode(),
"version": orjson.dumps(ZULIP_VERSION).decode(),
}
# Gather only entries with an ID greater than last_realm_count_id
try:
send_to_push_bouncer("POST", "server/analytics", request)
except JsonableError as e:
logging.warning(e.msg)
|
QuentinJi/pyuiautomation
|
initial_work.py
|
Python
|
mit
| 193 | 0 |
import subprocess
import os
def start_service():
subprocess.Popen("ipy start_
|
srv.py", stdout=subprocess.PIPE)
return 0
def close_service():
os.system("taskki
|
ll /im ipy.exe /f")
|
facetothefate/contrail-controller
|
src/discovery/disc_cassdb.py
|
Python
|
apache-2.0
| 14,755 | 0.009353 |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import re
from cfgm_common import jsonutils as json
import time
import gevent
import disc_consts
import disc_exceptions
from datetime import datetime
from gevent.coros import BoundedSemaphore
import pycassa
import pycassa.util
from pycassa.system_manager import *
from pycassa.util import *
from pycassa.types import *
from sandesh_common.vns.constants import DISCOVERY_SERVER_KEYSPACE_NAME, \
CASSANDRA_DEFAULT_GC_GRACE_SECONDS
class DiscoveryCassandraClient(object):
_DISCOVERY_KEYSPACE_NAME = DISCOVERY_SERVER_KEYSPACE_NAME
_DISCOVERY_CF_NAME = 'discovery'
@classmethod
def get_db_info(cls):
db_info = [(cls._DISCOVERY_KEYSPACE_NAME, [cls._DISCOVERY_CF_NAME])]
return db_info
# end get_db_info
def __init__(self, module, cass_srv_list, reset_config=False,
max_retries=5, timeout=5, cass_credential=None):
self._disco_cf_name = 'discovery'
self._keyspace_name = 'DISCOVERY_SERVER'
self._reset_config = reset_config
self._credential = cass_credential
self._cassandra_init(cass_srv_list, max_retries, timeout)
self._debug = {
}
#end __init__
# Helper routines for cassandra
def _cassandra_init(self, server_list, max_retries, timeout):
# column name <table-name>, <id1>, <id2>
disco_cf_info = (self._disco_cf_name,
CompositeType(AsciiType(), UTF8Type(), UTF8Type()), AsciiType())
# 1. Ensure keyspace and schema/CFs exist
self._cassandra_ensure_keyspace(server_list, self._keyspace_name,
[disco_cf_info])
pool = pycassa.ConnectionPool(self._keyspace_name,
server_list, max_overflow=-1,
use_threadlocal=True, prefill=True,
pool_size=100, pool_timeout=120,
max_retries=max_retries, timeout=timeout,
credentials=self._credential)
rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.ONE
wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.ONE
self._disco_cf = pycassa.ColumnFamily(pool, self._disco_cf_name,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
#end _cassandra_init
def _cassandra_ensure_keyspace(self, server_list,
keyspace_name, cf_info_list):
# Retry till cassandra is up
server_idx = 0
num_dbnodes = len(server_list)
connected = False
while not connected:
try:
cass_server = server_list[server_idx]
sys_mgr = SystemManager(cass_server,credentials=self._credential)
connected = True
except Exception as e:
# TODO do only for thrift.transport.TTransport.TTransportException
server_idx = (server_idx + 1) % num_dbnodes
time.sleep(3)
if self._reset_config:
try:
sys_mgr.drop_keyspace(keyspace_name)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
try:
# TODO replication_factor adjust?
sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY,
{'replication_factor': str(num_dbnodes)})
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
for cf_info in cf_info_list:
try:
(cf_name, comparator_type, validator_type) = cf_info
sys_mgr.create_column_family(keyspace_name, cf_name,
comparator_type = comparator_type, default_validation_class = validator_type)
sys_mgr.alter_column_family(keyspace_name, cf_name,
gc_grace_seconds=CASSANDRA_DEFAULT_GC_GRACE_SECONDS)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
#end _cassandra_ensure_keyspace
def get_debug_stats(self):
return self._debug
# end
"""
various column names
('client', client_id, 'client-entry')
('subscriber', service_id, client_id)
('subscription', client_id, service_id)
('service', service_id, 'service-entry')
"""
# decorator to catch connectivity error
def cass_error_handler(func):
def error_handler(*args, **kwargs):
try:
return func(*args,**kwargs)
except (pycassa.pool.AllServersUnavailable,
pycassa.pool.MaximumRetryException):
raise disc_exceptions.ServiceUnavailable()
ex
|
cept Exception as e:
raise
return error_handler
# return all publisher entries
@cass_error_handler
def service_entries(self, service_type = None):
col_name = ('service',)
try:
data = self._disco_cf.get_range(column_start = col_
|
name, column_finish = col_name)
for service_type, services in data:
for col_name in services:
col_value = services[col_name]
entry = json.loads(col_value)
col_name = ('subscriber', entry['service_id'],)
entry['in_use'] = self._disco_cf.get_count(service_type,
column_start = col_name, column_finish = col_name)
yield(entry)
except pycassa.pool.AllServersUnavailable:
raise disc_exceptions.ServiceUnavailable()
#raise StopIteration
# return all clients
def subscriber_entries(self):
col_name = ('client',)
data = self._disco_cf.get_range(column_start = col_name, column_finish = col_name)
for service_type, clients in data:
for col_name in clients:
(foo, client_id, service_id) = col_name
# skip pure client entry
if service_id == disc_consts.CLIENT_TAG:
continue
yield((client_id, service_type))
# end
# return all subscriptions
@cass_error_handler
def get_all_clients(self, service_type=None, service_id=None):
r = []
entry_format_subscriber = False
if service_type and service_id:
# ('subscriber', service_id, client_id)
col_name = ('subscriber', service_id,)
try:
clients = self._disco_cf.get(service_type, column_start = col_name,
column_finish = col_name, column_count = disc_consts.MAX_COL)
except pycassa.NotFoundException:
return None
data = [(service_type, dict(clients))]
entry_format_subscriber = True
elif service_type:
col_name = ('client', )
try:
clients = self._disco_cf.get(service_type, column_start = col_name,
column_finish = col_name, column_count = disc_consts.MAX_COL)
except pycassa.NotFoundException:
return None
data = [(service_type, dict(clients))]
else:
col_name = ('client', )
try:
data = self._disco_cf.get_range(column_start=col_name, column_finish=col_name)
except pycassa.NotFoundException:
return None
for service_type, clients in data:
rr = []
for col_name in clients:
if entry_format_subscriber:
(foo, service_id, client_id) = col_name
else:
(foo, client_id, service_id) = col_name
# skip pure client entry
if service_id == disc_consts.CLIENT_T
|
ricardogsilva/PyWPS
|
pywps/inout/formats/lists.py
|
Python
|
mit
| 1,471 | 0.00068 |
"""List of supported formats
"""
from collections import namedtuple
_FORMAT = namedtuple('FormatDefinition', 'mime_type,'
'extension, schema')
_FORMATS = namedtuple('FORMATS', 'GEOJSON, JSON, SHP, GML, GEOTIFF, WCS,'
'WCS100, WCS110, WCS20, WFS, WFS100,'
'WFS110, WFS20, WMS, WMS130, WMS110,'
'WMS100')
FORMATS = _FORMATS(
_FORMAT('application/vnd.geo+json', '.geojson', None),
_FORMAT('application/json', '.json', None),
_FORMAT('application/x-zipped-shp', '.zip', None),
_FORMAT('application/gml+xml', '.gml', None),
_FORMAT('image/tiff; subtype=geotiff', '.tiff', None),
_FORMAT('application/xogc-wcs', '.xml', None),
_FO
|
RMAT('application/x-ogc-wcs; version=1.0.0', '.xml', None),
_FORMAT('application/x-ogc-wcs; version=1.1.0', '.xml', None),
_FORMAT('application/x-ogc-wcs; version=2.0', '.xml', None),
_FORMAT('application/x-ogc-wfs', '.xml', None),
_FORMAT('application/x-ogc-wfs; version=1.0.0', '.xml', None),
_FORMAT('appli
|
cation/x-ogc-wfs; version=1.1.0', '.xml', None),
_FORMAT('application/x-ogc-wfs; version=2.0', '.xml', None),
_FORMAT('application/x-ogc-wms', '.xml', None),
_FORMAT('application/x-ogc-wms; version=1.3.0', '.xml', None),
_FORMAT('application/x-ogc-wms; version=1.1.0', '.xml', None),
_FORMAT('application/x-ogc-wms; version=1.0.0', '.xml', None)
)
|
mariofg92/ivmario
|
web2.py
|
Python
|
gpl-3.0
| 286 | 0.013986 |
from flask import Flask,request, jsonify
import json
app = Flask(__name__)
@app.route("/")
def rutaStatus():
return jsonify(status='OK')
@app.
|
route("/status")
def rutaStatusDocker():
return jsonify(status='OK')
if _
|
_name__ == "__main__":
app.run(host='0.0.0.0', port=80)
|
supermari0/ironic
|
ironic/dhcp/none.py
|
Python
|
apache-2.0
| 1,015 | 0 |
# Copyright 2014 Rackspace, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (
|
the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# Lice
|
nse for the specific language governing permissions and limitations
# under the License.
from ironic.dhcp import base
class NoneDHCPApi(base.BaseDHCP):
"""No-op DHCP API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
pass
def update_dhcp_opts(self, task, options, vifs=None):
pass
def update_port_address(self, port_id, address, token=None):
pass
def get_ip_addresses(self, task):
return []
|
kevinadda/dmn-chatbot
|
dmn_plus.py
|
Python
|
mit
| 15,738 | 0.004384 |
import sys
import time
import numpy as np
from copy import deepcopy
import tensorflow as tf
import babi_input
class Config(object):
"""Holds model hyperparams and data information."""
batch_size = 100
embed_size = 80
hidden_size = 80
max_epochs = 256
early_stopping = 20
dropout = 0.9
lr = 0.001
l2 = 0.001
cap_grads = False
max_grad_val = 10
noisy_grads = False
word2vec_init = False
embedding_init = 1.7320508 # root 3
# set to zero with strong supervision to only train gates
strong_supervision = False
beta = 1
drop_grus = False
anneal_threshold = 1000
anneal_by = 1.5
num_hops = 3
num_attention_features = 4
max_allowed_inputs = 130
num_train = 9000
floatX = np.float32
babi_id = "1"
babi_test_id = ""
train_mode = True
def _add_gradient_noise(t, stddev=1e-3, name=None):
"""Adds gradient noise as described in http://arxiv.org/abs/1511.06807
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks."""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
# from https://github.com/domluna/memn2n
def _position_encoding(sentence_size, embedding_size):
"""Position encoding described in section 4.1 in "End to End Memory Networks" (http://arxiv.org/pdf/1503.08895v5.pdf)"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
# TODO fix positional encoding so that it varies according to sentence lengths
def _xavier_weight_init():
"""Xavier initializer for all variables except embeddings as desribed in [1]"""
def _xavier_initializer(shape, **kwargs):
eps = np.sqrt(6) / np.sqrt(np.sum(shape))
out = tf.random_uniform(shape, minval=-eps, maxval=eps)
return out
return _xavier_initializer
# from https://danijar.com/variable-sequence-lengths-in-tensorflow/
# used only for custom attention GRU as TF handles this with the sequence length param for normal RNNs
def _last_relevant(output, length):
"""Finds the output at the end of each input"""
batch_size = int(output.get_shape()[0])
max_length = int(output.get_shape()[1])
out_size = int(output.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (length - 1)
flat = tf.reshape(output, [-1, out_size])
relevant = tf.gather(flat, index)
return relevant
class DMN_PLUS(object):
def load_data(self, debug=False):
"""Loads train/valid/test data and sentence encoding"""
if self.config.train_mode:
self.train, self.valid, self.word_embedding, self.max_q_len, self.max_input_len, self.max_sen_len, self.num_supporting_facts, self.vocab_size = babi_input.load_babi(self.config, split_sentences=True)
else:
self.test, self.word_embedding, self.max_q_len, self.max_input_len, self.max_sen_len, self.num_supporting_facts, self.vocab_size = babi_input.load_babi(self.config, split_sentences=True)
self.encoding = _position_encoding(self.max_sen_len, self.config.embed_size)
def add_placeholders(self):
"""add data placeholder to graph"""
self.question_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.max_q_len))
self.input_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.max_input_len, self.max_sen_len))
self.question_len_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size,))
self.input_len_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size,))
self.answer_placeholder = tf.placeholder(tf.int64, shape=(self.config.batch_size,))
self.rel_label_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.num_supporting_facts))
self.dropout_placeholder = tf.placeholder(tf.float32)
def add_reused_variables(self):
"""Adds trainable variables which are later reused"""
gru_cell = tf.nn.rnn_cell.GRUCell(self.config.hidden_size)
# apply droput to grus if flag set
if self.config.drop_grus:
self.gru_cell = tf.nn.rnn_cell.DropoutWrapper(gru_cell, input_keep_prob=self.dropout_placeholder, output_keep_prob=self.dropout_placeholder)
else:
self.gru_cell = gru_cell
with tf.variable_scope("memory/attention", initializer=_xavier_weight_init()):
b_1 = tf.get_variable("bias_1", (self.config.embed_size,))
W_1 = tf.get_variable("W_1", (self.config.embed_size*self.config.num_attention_features, self.config.embed_size))
W_2 = tf.get_variable("W_2", (self.config.embed_size, 1))
b_2 = tf.get_variable("bias_2", 1)
with tf.variable_scope("memory/attention_gru", initializer=_xavier_weight_init()):
Wr = tf.get_variable("Wr", (self.config.embed_size, self.config.hidden_size))
Ur = tf.get_variable("Ur", (self.config.hidden_size, self.config.hidden_size))
br = tf.get_variable("bias_r", (1, self.config.hidden_size))
W = tf.get_variable("W", (self.config.embed_size, self.config.hidden_size))
U = tf.get_variable("U", (self.config.hidden_size, self.config.hidden_size))
bh = tf.get_variable("bias_h", (1, self.config.hidden_size))
def get_predictions(self, output):
"""Get answer predictions from output"""
preds = tf.nn.softmax(output)
pred = tf.argmax(preds, 1)
return pred
def add_loss_op(self, output):
"""Calculate loss"""
# optional strong supervision of attention with supporting facts
gate_loss = 0
if self.config.strong_supervision:
for i, att in enumerate(self.attentions):
labels = tf.gather(tf.transpose(self.rel_label_placeholder), 0)
gate_loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(att, labels))
loss = self.config.beta*tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(output, self.answer_placeholder)) + gate_loss
# add l2 regularization for all variables except biases
for v in tf.trainable_variables():
if not 'bias' in v.name.lower():
loss += self.config.l2*tf.nn.l2_loss(v)
tf.summary.scalar('loss', loss)
return loss
def add_training_op(self, loss):
"""Calculate and apply gradients"""
opt = tf.train.AdamOptimizer(learning_rate=self.config.lr)
gvs = opt.compute_gradients(loss)
# optionally cap and noise gradients to regularize
if self.config.cap_grads:
gvs = [(tf.clip_by_norm(grad, self.config.max_grad_val), var) for grad, var in gvs]
if self.config.noisy_grads:
gvs = [(_add_gradient_noise(grad), var) for grad, var in gvs]
train_op = opt.apply_gradients(gvs)
return train_op
|
def get_question_representation(self, embeddings):
"""Get question vectors via embedding and GRU"""
questions = tf.nn.embedding_lookup(embeddings, self.question_placehol
|
der)
questions = tf.split(1, self.max_q_len, questions)
questions = [tf.squeeze(q, squeeze_dims=[1]) for q in questions]
_, q_vec = tf.nn.rnn(self.gru_cell, questions, dtype=np.float32, sequence_length=self.question_len_placeholder)
return q_vec
def get_input_representation(self, embeddings):
"""Get fact (sentence) vectors via embedding, positional encoding and bi-directional GRU"""
# get word vectors from embedding
inputs = tf.nn.embedding_lookup(embeddings, self.input_placeholder)
# use encoding to get sentence repres
|
baxeico/pyworkingdays
|
workingdays/__init__.py
|
Python
|
mit
| 947 | 0.006336 |
from datetim
|
e import timedelta
from math import copysign
def is_workingday(input_date):
return input_date.isoweekday() < 6
def add(datestart, days):
sign = lambda x: int(copysign(1, x))
dateend = datestart
while days:
dateend = dateend + timedelta(days=sign(days))
if is_workingday(dateend):
days -= sign(days)
return dateend
def diff(date1, date2):
if date1 == date2:
return 0
if date1 > date2:
min_date = date2
max_d
|
ate = date1
else:
min_date = date1
max_date = date2
diff = 0
current_date = min_date
while current_date != max_date:
current_date = current_date + timedelta(days=1)
if is_workingday(current_date):
diff += 1
return diff
def next(datestart):
while True:
datestart = datestart + timedelta(days=1)
if is_workingday(datestart):
break
return datestart
|
mogproject/mog-commons-python
|
tests/mog_commons/test_unittest.py
|
Python
|
apache-2.0
| 3,948 | 0.003214 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import os
import six
from mog_commons import unittest
class TestUnitTest(unittest.TestCase):
def test_assert_output(self):
def f():
print('abc')
print('123')
sys.stderr.writelines(['def\n', '456\n'])
self.assertOutput('abc\n123\n', 'def\n456\n', f)
def test_assert_output_fail(self):
def f():
print('abc')
print('123')
sys.stderr.writelines(['def\n', '456\n'])
self.assertRaisesRegexp(AssertionError, 'abc.+ != ', self.assertOutput, '', 'def\n456\n', f)
self.assertRaisesRegexp(AssertionError, 'def.+ != ', self.assertOutput, 'abc\n123\n', '', f)
self.assertRaisesRegexp(AssertionError, 'def.+ != .+def', self.assertOutput, 'abc\n123\n', 'def\n456\n\n', f)
def test_assert_system_exit(self):
self.assertSystemExit(123, lambda: sys.exit(123))
self.assertSystemExit(234, lambda x: sys.exit(x), 234)
def test_with_bytes_output(self):
with self.withBytesOutp
|
ut() as (out, err):
out.write(b'\xff\xfe')
out.write('あいうえお'.encode('utf-8'))
err.write(b'\xfd\xfc')
self.assertEqual(out.getvalue(), b'\xff\xfe' + 'あいうえお'.encode('utf-8'))
self.assertEqual(err.getvalue(), b'\xfd\xfc')
def test_with_bytes_output_types
|
(self):
# accepts unicode
def f(data, expected):
with self.withBytesOutput() as (out, err):
for d in data:
out.write(d)
self.assertEqual(out.getvalue(), expected)
f(['あいうえお'], 'あいうえお'.encode('utf-8'))
f([b'\xff', 'あいうえお'], b'\xff' + 'あいうえお'.encode('utf-8'))
# accepts only string-like types
self.assertRaises(TypeError, f, [[]])
self.assertRaises(TypeError, f, [{'a': 20}])
self.assertRaises(TypeError, f, [1.23])
def test_with_assert_output_file(self):
def f(text):
with self.withAssertOutputFile(os.path.join('tests', 'resources', 'utf8_ja.txt')) as out:
out.write(text.encode('utf-8'))
def g(text):
with self.withAssertOutputFile(
os.path.join('tests', 'resources', 'utf8_ja_template.txt'), {'text': 'かきくけこ'}
) as out:
out.write(text.encode('utf-8'))
f('あいうえお\n')
self.assertRaisesRegexp(AssertionError, 'あいうえお', f, 'あいうえお')
g('かきくけこ\n')
self.assertRaisesRegexp(AssertionError, 'かきくけこ', g, 'あいうえお\n')
def test_assert_raises_message(self):
class MyException(Exception):
pass
def f(msg):
raise MyException(msg)
self.assertRaisesMessage(MyException, 'あいうえお', f, 'あいうえお')
self.assertRaisesMessage(AssertionError, 'MyException not raised',
self.assertRaisesMessage, MyException, 'あいうえお', lambda: None)
if six.PY2:
expected = ("u'\\u3042\\u3044\\u3046\\u3048' != u'\\u3042\\u3044\\u3046\\u3048\\u304a'\n" +
"- \u3042\u3044\u3046\u3048\n+ \u3042\u3044\u3046\u3048\u304a\n? +\n")
else:
expected = "'あいうえ' != 'あいうえお'\n- あいうえ\n+ あいうえお\n? +\n"
self.assertRaisesMessage(AssertionError, expected,
self.assertRaisesMessage, MyException, 'あいうえお', f, 'あいうえ')
def test_assert_system_exit_fail(self):
self.assertRaisesRegexp(AssertionError, 'SystemExit not raised', self.assertSystemExit, 0, lambda: 0)
self.assertRaisesRegexp(AssertionError, '1 != 0', self.assertSystemExit, 0, lambda: sys.exit(1))
|
niavlys/kivy
|
kivy/uix/carousel.py
|
Python
|
mit
| 21,776 | 0.000092 |
'''
Carousel
========
.. versionadded:: 1.4.0
The :class:`Carousel` widget provides the classic mobile-friendly carousel view
where you can swipe between slides.
You can add any content to the carousel and use it horizontally or verticaly.
The carousel can display pages in loop or not.
Example::
class Example1(App):
def build(self):
carousel = Carousel(direction='right')
for i in range(10):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = Factory.AsyncImage(source=src, allow_stretch=True)
carousel.add_widget(image)
return carousel
Example1().run()
.. versionchanged:: 1.5.0
The carousel now supports active children, like the
:class:`~kivy.uix.scrollview.ScrollView`. It will detect a swipe gesture
according to :attr:`Carousel.scroll_timeout` and
:attr:`Carousel.scroll_distance`.
In addition, the container used for adding a slide is now hidden in
the API. We made a mistake by exposing it to the user. The impacted
properties are:
:attr:`Carousel.slides`, :attr:`Carousel.current_slide`,
:attr:`Carousel.previous_slide` and :attr:`Carousel.next_slide`.
'''
__all__ = ('Carousel', )
from functools import partial
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.animation import Animation
from kivy.uix.stencilview import StencilView
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import BooleanProperty, OptionProperty, AliasProperty, \
NumericProperty, ListProperty, ObjectProperty, StringProperty
class Carousel(StencilView):
'''Carousel class. See module documentation for more information.
'''
slides = ListProperty([])
'''List of slides inside the Carousel. The slides are added when a
widget is added to Carousel using add_widget().
:attr:`slides` is a :class:`~kivy.properties.ListProperty` and is
read-only.
'''
def _get_slides_container(self):
return [x.parent for x in self.slides]
slides_container = AliasProperty(_get_slides_container, None,
bind=('slides', ))
direction = OptionProperty('right',
options=('right', 'left', 'top', 'bottom'))
'''Specifies the direction in which the slides are ordered i.e. the
direction from which the user swipes to go from one slide to the next.
Can be `right`, `left`, 'top', or `bottom`. For example, with
the default value of `right`, the second slide is to the right
of the first and the user would swipe fr
|
om the right towards the
left to get to the second slide.
:attr:`direction` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'right'.
'''
min_move = NumericProperty(0.2)
'''Defines the minimal distance from the edge where the movement is
considered a swipe gesture and the Carousel will change its content.
This is a percentage of the Carousel width.
If the movement doesn't reach this minimal va
|
lue, then the movement is
cancelled and the content is restored to its original position.
:attr:`min_move` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.2.
'''
anim_move_duration = NumericProperty(0.5)
'''Defines the duration of the Carousel animation between pages.
:attr:`anim_move_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.5.
'''
anim_cancel_duration = NumericProperty(0.3)
'''Defines the duration of the animation when a swipe movement is not
accepted. This is generally when the user doesnt swipe enough.
See :attr:`min_move`.
:attr:`anim_cancel_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.3.
'''
loop = BooleanProperty(False)
'''Allow the Carousel to swipe infinitely. When the user reaches the last
page, they will return to first page when trying to swipe to the next.
:attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_index(self):
if self.slides:
return self._index % len(self.slides)
return float('nan')
def _set_index(self, value):
if self.slides:
self._index = value % len(self.slides)
else:
self._index = float('nan')
index = AliasProperty(_get_index, _set_index, bind=('_index', 'slides'))
'''Get/Set the current visible slide based on the index.
:attr:`index` is a :class:`~kivy.properties.AliasProperty` and defaults
to 0 (the first item).
'''
def _prev_slide(self):
slides = self.slides
len_slides = len(slides)
index = self.index
if len_slides < 2: # None, or 1 slide
return None
if len_slides == 2:
if index == 0:
return None
if index == 1:
return slides[0]
if self.loop and index == 0:
return slides[-1]
if index > 0:
return slides[index - 1]
previous_slide = AliasProperty(_prev_slide, None, bind=('slides', 'index'))
'''The previous slide in the Carousel. It is None if the current slide is
the first slide in the Carousel. If :attr:`orientation` is 'horizontal',
the previous slide is to the left. If :attr:`orientation` is 'vertical',
the previous slide towards the bottom.
:attr:`previous_slide` is a :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
This property doesn't expose the container used for storing the slide.
It returns the widget you have added.
'''
def _curr_slide(self):
if len(self.slides):
return self.slides[self.index]
current_slide = AliasProperty(_curr_slide, None, bind=('slides', 'index'))
'''The currently shown slide.
:attr:`current_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property doesn't expose the container used for storing the slide.
It returns widget you have added.
'''
def _next_slide(self):
if len(self.slides) < 2: # None, or 1 slide
return None
if len(self.slides) == 2:
if self.index == 0:
return self.slides[1]
if self.index == 1:
return None
if self.loop and self.index == len(self.slides) - 1:
return self.slides[0]
if self.index < len(self.slides) - 1:
return self.slides[self.index + 1]
next_slide = AliasProperty(_next_slide, None, bind=('slides', 'index'))
'''The next slide in the Carousel. It is None if the current slide is
the last slide in the Carousel. If :attr:`orientation` is 'horizontal',
the next slide is to the right. If :attr:`orientation` is 'vertical',
the next slide is towards the bottom.
:attr:`next_slide` is a :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property doesn't expose the container used for storing the slide.
It returns the widget you have added.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.
If the user has not moved :attr:`scroll_distance` within the timeout,
the scrolling will be disabled and the touch event will go to the children.
:attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and
defaults to 200 (milliseconds)
.. versionadded:: 1.5.0
'''
scroll_distance = NumericProperty('20dp')
'''Distance to move before scrolling the :class:`Carousel` in pixels. As
soon as the distance has been traveled, the :class:`Carousel` will start
to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and
defaults to 20dp.
.. versionadded:: 1.5.0
'''
anim_type = StringProperty('out_quad')
'''Type of animation to
|
fin/froide
|
froide/bounce/signals.py
|
Python
|
mit
| 236 | 0 |
from django.dispatch import Signal
user_email_bounc
|
ed = Signal() # a
|
rgs: ['bounce', 'should_deactivate']
email_bounced = Signal() # args: ['bounce', 'should_deactivate']
email_unsubscribed = Signal() # args: ['email', 'reference']
|
tzuria/Shift-It-Easy
|
webApp/shift-it-easy-2015/web/pages/MainManager.py
|
Python
|
mit
| 87,797 | 0.038361 |
#!/usr/bin/env python
#
# Copyright 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext.webapp import template
from models.employee import Employee
from models.constrain import Constrain
from models.preparingSchdule import PreparingSchedule
from models.submittedShifts import SubmittedShifts
import json
import time
from datetime import date
from datetime import timedelta
from Dates import Dates
import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
userName = None
if self.request.cookies.get('our_token'): #the cookie that should contain the access token!
userName = Employee.checkToken(self.request.cookies.get('our_token'))
template_variables = {}
if userName:
template_variables['userName'] = userName.userName
dates = Dates(template_variables)
template_variables = dates.nextTwoWeeks()
#### First week ####
sunday0date = dates.createDateObject(0,1)
monday0date = dates.createDateObject(0,2)
tuesday0date = dates.createDateObject(0,3)
wednesday0date = dates.createDateObject(0,4)
thursday0date = dates.createDateObject(0,5)
friday0date = dates.createDateObject(0,6)
saturday0date = dates.createDateObject(0,7)
sunday1date = dates.createDateObject(1,1)
monday1date = dates.createDateObject(1,2)
tuesday1date = dates.createDateObject(1,3)
wednesday1date = dates.createDateObject(1,4)
thursday1date = dates.createDateObject(1,5)
friday1date = dates.createDateObject(1,6)
saturday1date = dates.createDateObject(1,7)
# Add default "white" constrains to employees who hasn't added constrains on their side.
employees = Employee.query().fetch()
if employees:
for e in employees:
constrains = Constrain.query(Constrain.employeeUN == e.userName).fetch()
if not constrains:
Constrain.addConstrains(e.userName,sunday0date)
# Sunday0 night info:
head_nurse_want = Constrain.getShiftHeads(sunday0date, 0, 1)
head_nurse_dont_care = Constrain.getShiftHeads(sunday0date, 0, 0)
head_nurse_prefer_not = Constrain.getShiftHeads(sunday0date, 0, 2)
head_nurse_cant = Constrain.getShiftHeads(sunday0date, 0, 3)
want = Constrain.getCrew(sunday0date, 0, 1)
dont_care = Constrain.getCrew(sunday0date, 0, 0)
prefer_not = Constrain.getCrew(sunday0date, 0, 2)
cant = Constrain.getCrew(sunday0date, 0, 3)
assignBeforeHead = PreparingSchedule.checkIfAssignAlready(sunday0date, 0, 0)
assignBeforeSecond = PreparingSchedule.checkIfAssignAlready(sunday0date, 0, 1)
assignBeforeStandBy = PreparingSchedule.checkIfAssignAlready(sunday0date, 0, 3)
if assignBeforeHead:
template_variables['Sunday0NightAssignBeforeHead'] = assignBeforeHead
if assignBeforeSecond:
template_variables['Sunday0NightAssignBeforeSecond'] = assignBeforeSecond
if assignBeforeStandBy:
template_variables['Sunday0NightAssignBeforeStandBy'] = assignBeforeStandBy
if head_nurse_want:
template_variables['HeadNurseWhoWantSunday0Night'] = head_nurse_want
if head_nurse_dont_care:
template_variables['HeadNurseWhoDontCareSunday0Night'] = head_nurse_dont_care
if head_nurse_prefer_not:
template_variables['HeadNurseWhoPreferNotSunday0Night'] = head_nurse_prefer_not
if head_nurse_cant:
template_variables['HeadNurseWhoCantSunday0Night'] = head_nurse_cant
if want:
template_variables['NurseWhoWantSunday0Nigh
|
t'] = want
if dont_care:
|
template_variables['NurseWhoDontCareSunday0Night'] = dont_care
if prefer_not:
template_variables['NurseWhoPreferNotSunday0Night'] = prefer_not
if cant:
template_variables['NurseWhoCantSunday0Night'] = cant
# Sunday0 morning info:
head_nurse_want = Constrain.getShiftHeads(sunday0date, 1, 1)
head_nurse_dont_care = Constrain.getShiftHeads(sunday0date, 1, 0)
head_nurse_prefer_not = Constrain.getShiftHeads(sunday0date, 1, 2)
head_nurse_cant = Constrain.getShiftHeads(sunday0date, 1, 3)
want = Constrain.getCrew(sunday0date, 1, 1)
dont_care = Constrain.getCrew(sunday0date, 1, 0)
prefer_not = Constrain.getCrew(sunday0date, 1, 2)
cant = Constrain.getCrew(sunday0date, 1, 3)
assignBeforeHead = PreparingSchedule.checkIfAssignAlready(sunday0date, 1, 0)
assignBeforeSecond = PreparingSchedule.checkIfAssignAlready(sunday0date, 1, 1)
assignBeforeStandBy = PreparingSchedule.checkIfAssignAlready(sunday0date, 1, 3)
if assignBeforeHead:
template_variables['Sunday0MorningAssignBeforeHead'] = assignBeforeHead
if assignBeforeSecond:
template_variables['Sunday0MorningAssignBeforeSecond'] = assignBeforeSecond
if assignBeforeStandBy:
template_variables['Sunday0MorningAssignBeforeStandBy'] = assignBeforeStandBy
if head_nurse_want:
template_variables['HeadNurseWhoWantSunday0Morning'] = head_nurse_want
if head_nurse_dont_care:
template_variables['HeadNurseWhoDontCareSunday0Morning'] = head_nurse_dont_care
if head_nurse_prefer_not:
template_variables['HeadNurseWhoPreferNotSunday0Morning'] = head_nurse_prefer_not
if head_nurse_cant:
template_variables['HeadNurseWhoCantSunday0Morning'] = head_nurse_cant
if want:
template_variables['NurseWhoWantSunday0Morning'] = want
if dont_care:
template_variables['NurseWhoDontCareSunday0Morning'] = dont_care
if prefer_not:
template_variables['NurseWhoPreferNotSunday0Morning'] = prefer_not
if cant:
template_variables['NurseWhoCantSunday0Morning'] = cant
# Sunday0 noon info:
head_nurse_want = Constrain.getShiftHeads(sunday0date, 2, 1)
head_nurse_dont_care = Constrain.getShiftHeads(sunday0date, 2, 0)
head_nurse_prefer_not = Constrain.getShiftHeads(sunday0date, 2, 2)
head_nurse_cant = Constrain.getShiftHeads(sunday0date, 2, 3)
want = Constrain.getCrew(sunday0date, 2, 1)
dont_care = Constrain.getCrew(sunday0date, 2, 0)
prefer_not = Constrain.getCrew(sunday0date, 2, 2)
cant = Constrain.getCrew(sunday0date, 2, 3)
assignBeforeHead = PreparingSchedule.checkIfAssignAlready(sunday0date, 2, 0)
assignBeforeSecond = PreparingSchedule.checkIfAssignAlready(sunday0date, 2, 1)
assignBeforeStandBy = PreparingSchedule.checkIfAssignAlready(sunday0date, 2, 3)
if assignBeforeHead:
template_variables['Sunday0NoonAssignBeforeHead'] = assignBeforeHead
if assignBeforeSecond:
template_variables['Sunday0NoonAssignBeforeSecond'] = assignBeforeSecond
if assignBeforeStandBy:
template_variables['Sunday0NoonAssignBeforeStandBy'] = assignBeforeStandBy
if head_nurse_want:
template_variables['HeadNurseWhoWantSunday0Noon'] = head_nurse_want
if head_nurse_dont_care:
template_variables['HeadNurseWhoDontCareSunday0Noon'] = head_nurse_dont_care
if head_nurse_prefer_not:
template_variables['HeadNurseWhoPreferNotSunday0Noon'] = head_nurse_prefer_not
if head_nurse_cant:
template_variables['HeadNurseWhoCantSunday0Noon'] = head_nurse_cant
if want:
template_variables['NurseWhoWantSunday0Noon'] = want
if dont_care:
template_variables['NurseWhoDontCareSunday0Noon'] = dont_care
if prefer_not:
template_variables['NurseWhoPreferNotSunday0Noon'] = prefer_not
if cant:
template_variables['NurseWhoCantSunday0Noon'] = cant
# Monday0 night info:
head_nurse_want = Constrain.getShiftHeads(monday0date, 0, 1)
head_nurse_dont_care = Constrain.getShiftHeads(monday0date, 0, 0)
head_nurse_prefer_not = Constrain.getShiftHeads(monday0d
|
pthatcher/psync
|
src/exp/watch.py
|
Python
|
bsd-3-clause
| 2,850 | 0.005965 |
# Copyright (c) 2011, Peter Thatcher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDE
|
D BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# ME
|
RCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from thirdparty import pyinotify
# Wow. watching ~ uses A LOT of CPU!
class Handler(pyinotify.ProcessEvent):
def process_default(self, event):
print ("default", event)
# also process_IN_CREATE and process_IN_DELETE
def process_IN_MODIFY(self, event):
print ("IN_MODIFY", event.path, event.name)
def process_IN_CREATE(self, event):
print ("IN_CREATE", event.path, event.name)
def process_IN_DELETE(self, event):
print ("IN_DELETE", event.path, event.name)
def on_loop(notifier):
print ("on_loop", notifier)
if __name__ == "__main__":
import sys
root = sys.argv[1]
# pyinotify.log.setLevel(10)
handler = Handler()
# Exclude patterns from list
excl_lst = [] # ['(^|*/).bibble']
excl = pyinotify.ExcludeFilter(excl_lst)
wm = pyinotify.WatchManager()
# first arg can be a list
# can use pyinotify.ALL_EVENTS
# rec=True means recursive. Must have!
wm.add_watch(root, pyinotify.IN_MODIFY | pyinotify.IN_CREATE | pyinotify.IN_DELETE,
rec=True, auto_add=True, exclude_filter=excl)
notifier = pyinotify.Notifier(wm, default_proc_fun=handler)
notifier.loop(callback = on_loop)
# if daemonize = True, spawns another process
# notifier.loop(daemonize=True, callback=on_loop,
# pid_file='/tmp/pyinotify.pid', stdout='/tmp/stdout.txt')
|
schelleg/PYNQ
|
pynq/lib/logictools/tests/test_boolean_generator.py
|
Python
|
bsd-3-clause
| 14,127 | 0.000354 |
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import sample
from random import choice
from copy import deepcopy
import re
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools import LogicToolsController
from pynq.lib.logictools import BooleanGenerator
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQ
|
Z1_LOGICTOOLS_SPECIFICATION
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_
|
yes("\nTest boolean generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_bool_state():
"""Test for the BooleanGenerator class.
This test will test configurations when all 5 pins of a LUT are
specified. Users need to manually check the output.
"""
ol.download()
input('\nDisconnect all the pins. Hit enter after done ...')
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
first_6_pins = [k for k in list(pin_dict.keys())[:6]]
out_pin = first_6_pins[5]
in_pins = first_6_pins[0:5]
or_expr = out_pin + '=' + ('|'.join(in_pins))
bool_generator = BooleanGenerator(mb_info)
assert bool_generator.status == 'RESET'
bool_generator.trace()
bool_generator.setup({'test_bool_state': or_expr})
assert bool_generator.status == 'READY'
bool_generator.run()
assert bool_generator.status == 'RUNNING'
print('Connect all of {} to GND ...'.format(in_pins))
assert user_answer_yes("{} outputs logic low?".format(out_pin)), \
"Boolean configurator fails to show logic low."
print('Connect any of {} to VCC ...'.format(in_pins))
assert user_answer_yes("{} outputs logic high?".format(out_pin)), \
"Boolean configurator fails to show logic high."
bool_generator.stop()
assert bool_generator.status == 'READY'
bool_generator.step()
assert bool_generator.status == 'RUNNING'
bool_generator.stop()
assert bool_generator.status == 'READY'
bool_generator.reset()
assert bool_generator.status == 'RESET'
del bool_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_bool_no_trace():
"""Test for the BooleanGenerator class.
This test will test whether users can show waveform when no trace analyzer
is used. An exception should be raised.
"""
ol.download()
bool_generator = BooleanGenerator(mb_info)
bool_generator.trace(use_analyzer=False)
exception_raised = False
try:
bool_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
bool_generator.reset()
del bool_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_bool_multiple():
"""Test for the BooleanGenerator class.
This test will test the configurations when only part of the
LUT pins are used. Multiple instances will be tested.
This is an automatic test so no user interaction is needed.
"""
ol.download()
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
first_10_pins = [k for k in list(pin_dict.keys())[:10]]
in_pins = first_10_pins[0:5]
out_pins = first_10_pins[5:10]
test_expressions = list()
operations = ['&', '|', '^']
for i in range(5):
operation = choice(operations)
test_expressions.append(out_pins[i] + '=' +
(operation.join(sample(in_pins, i+1))))
print('\nConnect randomly {} to VCC or GND.'.format(in_pins))
input('Hit enter after done ...')
bool_generator = BooleanGenerator(mb_info)
bool_generator.trace()
bool_generator.setup(expressions=test_expressions)
bool_generator.run()
for expr_label in bool_generator.expressions.keys():
waveform = bool_generator.waveforms[expr_label]
wavelanes_in = waveform.waveform_dict['signal'][0][1:]
wavelanes_out = waveform.waveform_dict['signal'][-1][1:]
expr = deepcopy(bool_generator.expressions[expr_label])
for wavelane in wavelanes_in:
if 'h' == wavelane['wave'][0]:
str_replace = '1'
elif 'l' == wavelane['wave'][0]:
str_replace = '0'
else:
raise ValueError("Unrecognizable pattern captured.")
expr = re.sub(r"\b{}\b".format(wavelane['name']),
str_replace, expr)
wavelane = wavelanes_out[0]
if 'h' == wavelane['wave'][0]:
str_replace = '1'
elif 'l' == wavelane['wave'][0]:
str_replace = '0'
else:
raise ValueError("Unrecognizable pattern captured.")
expr = re.sub(r"\b{}\b".format(wavelane['name']),
str_replace, expr)
expr = expr.replace('=', '==')
assert eval(expr), "Boolean expression {} not evaluating " \
"correctly.".format(
bool_generator.expressions[expr_label])
bool_generator.stop()
bool_generator.reset()
del bool_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_bool_step():
"""Test for the BooleanGenerator class.
This test will test whether the `step()` method works correctly.
Users will be asked to change input values during the test. The test
scenario is also an extreme case where only 2 samples are captured.
"""
ol.download()
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
first_10_pins = [k for k in list(pin_dict.keys())[:10]]
in_pins = first_10_pins[0:5]
out_pins = first_10_pins[5:10]
test_expressions = list()
operations = ['&', '|', '^']
for i in range(5):
operation = choice(operations)
test_expressions.append(out_pins[i] + '=' +
(operation.join(sample(in_pins, i+1))))
print('\nConnect randomly {} to VCC or GND.'.format(in_pins))
input('Hit enter after done ...')
bool_generator = BooleanGenerator(mb_info)
bool_generator.trace(num_analyzer_samples=2)
bool_generator.setup(expressions=test_expressions)
for i in range(2):
print('Change some of the connections from {}.'.format(in_pins))
input('Hit enter after d
|
sdpython/ensae_teaching_cs
|
_unittests/ut_dnotebooks/test_1A_notebook_soft_sql.py
|
Python
|
mit
| 1,241 | 0.002417 |
# -*- coding: utf-8 -*-
"""
@brief test log(time=92s)
"""
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
import ensae_teaching_cs
class TestNotebookRunner1a_soft_sql(unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails", "jyquickhelper", "mlstatpy"],
__file__, hide=True)
def test_notebook_runner_soft_sql(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_notebook1a_soft_sql")
from ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_1a
keepnote = ls_notebooks("td1a_soft")
for n i
|
n keepnote:
fLOG(n)
execute_notebooks(temp, keepnote,
lambda i, n: "csharp" not in n and "cython" not in n,
|
fLOG=fLOG,
clean_function=clean_function_1a,
dump=ensae_teaching_cs)
if __name__ == "__main__":
unittest.main()
|
pawkoz/dyplom
|
blender/build_files/cmake/cmake_consistency_check_config.py
|
Python
|
gpl-2.0
| 4,572 | 0.006124 |
import os
IGNORE = (
"/test/",
"/tests/gtests/",
"/BSP_GhostTest/",
"/release/",
"/xembed/",
"/TerraplayNetwork/",
"/ik_glut_test/",
# specific source files
"extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp",
"extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp",
"extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp",
"extern/bullet2/src/BulletCollision/CollisionDispatch/btBox2dBox2dCollisionAlgorithm.cpp",
"extern/bullet2/src/BulletCollision/CollisionDispatch/btConvex2dConvex2dAlgorithm.cpp",
"extern/bullet2/src/BulletCollision/CollisionDispatch/btInternalEdgeUtility.cpp",
"extern/bullet2/src/BulletCollision/CollisionShapes/btBox2dShape.cpp",
"extern/bullet2/src/BulletCollision/CollisionShapes/btConvex2dShape.cpp",
"extern/bullet2/src/BulletDynamics/Character/btKinematicCharacterController.cpp",
"extern/bullet2/src/BulletDynamics/ConstraintSolver/btHinge2Constraint.cpp",
"extern/bullet2/src/BulletDynamics/ConstraintSolver/btUniversalConstraint.cpp",
"extern/eltopo/common/meshes/ObjLoader.cpp",
"extern/eltopo/common/meshes/meshloader.cpp",
"extern/eltopo/common/openglutils.cpp",
"extern/eltopo/eltopo3d/broadphase_blenderbvh.cpp",
"source/blender/imbuf/intern/imbuf_cocoa.m",
"extern/recastnavigation/Recast/Source/RecastLog.cpp",
"extern/recastnavigation/Recast/Source/RecastTimer.cpp",
"intern/audaspace/SRC/AUD_SRCResampleFactory.cpp",
"intern/audaspace/SRC/AUD_SRCResampleReader.cpp",
"intern/cycles/render/film_response.cpp",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc",
"extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc",
"extern/bullet2/src/BulletCollision/CollisionDispatch/btBox2dBox2dCollisionAlgorithm.h",
"extern/bullet2/src/BulletCollision/CollisionDispatch/btConvex2dConvex2dAlgorithm.h",
"extern/bullet2/src/BulletCollision/CollisionDispatch/btInternalEdgeUtility.h",
"extern/bullet2/src/BulletCollision/CollisionShapes/btBox2dShape.h",
"extern/bullet2/src/BulletCollision/CollisionShapes/btConvex2dShape.h",
"extern/bullet2/src/BulletDynamics/Character/btKinematicCharacterController.h",
"extern/bullet2/src/BulletDynamics/ConstraintSolver/btHinge2Constraint.h",
"extern/bullet2/src/BulletDynamics/ConstraintSolver/btUniversalConstraint.h",
"extern/eltopo/common/meshes/Edge.hpp",
"extern/eltopo/common/meshes/ObjLoader.hpp",
"extern/eltopo/common/meshes/TriangleIndex.hpp",
"extern/eltopo/common/meshes/meshloader.h",
"extern/eltopo/eltopo3d/broadphase_blenderbvh.h",
"extern/recastnavigation/Recast/Include/RecastLog.h",
"extern/recastnavigation/Recast/Include/RecastTimer.h",
"intern/
|
audaspace/SRC/AUD_SRCResampleFactory.h",
"intern/audaspace/SRC/AUD_SRCResampleRea
|
der.h",
"intern/cycles/render/film_response.h",
"extern/carve/include/carve/config.h",
"extern/carve/include/carve/external/boost/random.hpp",
"extern/carve/patches/files/config.h",
"extern/carve/patches/files/random.hpp",
)
UTF8_CHECK = True
SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))))
# doesn't have to exist, just use as reference
BUILD_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(SOURCE_DIR, "..", "build"))))
|
David-Wobrock/django-fake-database-backends
|
tests/test_project/test_project/urls.py
|
Python
|
mit
| 776 | 0 |
"""linter_test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more infor
|
mation please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLco
|
nf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
mtitinger/libsigrok
|
bindings/swig/doc.py
|
Python
|
gpl-3.0
| 4,655 | 0.005371 |
##
## This file is part of the libsigrok project.
##
## Copyright (C) 2014 Martin Ling <martin-sigrok@earth.li>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from __future__ import print_function
from xml.etree import ElementTree
import sys, os
language, input_file = sys.argv[1:3]
if len(sys.argv) == 4:
mode = sys.argv[3]
input_dir = os.path.dirname(input_file)
index = ElementTree.parse(input_file)
def get_text(node):
paras = node.findall('para')
return str.join('\n\n', [p.text.rstrip() for p in paras if p.text])
for compound in index.findall('compound'):
if compound.attrib['kind'] != 'class':
continue
class_name = compound.find('name').text
if not class_name.startswith('sigrok::'):
continue
trimmed_name = class_name.split('::')[1]
doc = ElementTree.parse("%s/%s.xml" % (input_dir, compound.attrib['refid']))
cls = doc.find('compounddef')
brief = get_text(cls.find('briefdescription'))
if brief:
if language == 'python':
print('%%feature("docstring") %s "%s";' % (class_name, brief))
elif language == 'java':
print('%%typemap(javaclassmodifiers) %s "/** %s */\npublic class"' % (
class_name, brief))
constants = []
for section in cls.findall('sectiondef'):
kind = section.attrib['kind']
if kind not in ('public-func', 'public-static-attrib'):
continue
for member in section.findall('memberdef'):
member_name = member.find('name').text
brief = get_text(member.find('briefdescription')).replace('"', '\\"')
parameters = {}
for para in member.find('detaileddescription').findall('para'):
paramlist = para.find('parameterlist')
if paramlist is not None:
for param in paramlist.findall('parameteritem'):
namelis
|
t = param.find('parameternamelist')
|
name = namelist.find('parametername').text
description = get_text(param.find('parameterdescription'))
if description:
parameters[name] = description
if brief:
if language == 'python' and kind == 'public-func':
print(str.join('\n', [
'%%feature("docstring") %s::%s "%s' % (
class_name, member_name, brief)] + [
'@param %s %s' % (name, desc)
for name, desc in parameters.items()]) + '";')
elif language == 'java' and kind == 'public-func':
print(str.join('\n', [
'%%javamethodmodifiers %s::%s "/** %s' % (
class_name, member_name, brief)] + [
' * @param %s %s' % (name, desc)
for name, desc in parameters.items()])
+ ' */\npublic"')
elif kind == 'public-static-attrib':
constants.append((member_name, brief))
if language == 'java' and constants:
print('%%typemap(javacode) %s %%{' % class_name)
for member_name, brief in constants:
print(' /** %s */\n public static final %s %s = new %s(classesJNI.%s_%s_get(), false);\n' % (
brief, trimmed_name, member_name, trimmed_name,
trimmed_name, member_name))
print('%}')
elif language == 'python' and constants:
if mode == 'start':
print('%%extend %s {\n%%pythoncode %%{' % class_name)
for member_name, brief in constants:
print(' ## @brief %s\n %s = None' % (brief, member_name))
print('%}\n}')
elif mode == 'end':
print('%pythoncode %{')
for member_name, brief in constants:
print('%s.%s.__doc__ = """%s"""' % (
trimmed_name, member_name, brief))
print('%}')
|
SummerLW/Perf-Insight-Report
|
dashboard/dashboard/stored_object.py
|
Python
|
bsd-3-clause
| 6,638 | 0.008888 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for storing and getting objects from datastore.
This module provides Get, Set and Delete functions for storing pickleable
objects in datastore, with support for large objects greater than 1 MB.
Although this module contains ndb.Model classes, these are not intended
to be used directly by other modules.
App Engine datastore limits entity size to less than 1 MB; this module
supports storing larger objects by splitting the data and using multiple
datastore entities and multiple memcache keys. Using ndb.get and pickle, a
complex data structure can be retrieved more quickly than datastore fetch.
Example:
john = Account()
john.username = 'John'
john.userid = 123
stored_object.Set(john.userid, john)
"""
import cPickle as pickle
import logging
from google.appengine.api import memcache
from google.appengine.ext import ndb
_MULTIPART_ENTITY_MEMCACHE_KEY = 'multipart_entity_'
# Maximum number of entities and memcache to save a value.
# The limit for data stored in one datastore entity is 1 MB,
# and the limit for memcache batch operations is 32 MB. See:
# https://cloud.google.com/appengine/docs/python/memcache/#Python_Limits
_MAX_NUM_PARTS = 16
# Max bytes per entity or value cached with memcache.
_CHUNK_SIZE = 1000 * 1000
def Get(key):
"""Gets the value.
Args:
key: String key value.
Returns:
A value for key.
"""
results = MultipartCache.Get(key)
if not results:
results = _GetValueFromDatastore(key)
MultipartCache.Set(key, results)
return results
def Set(key, value):
"""Sets the value in datastore and memcache with limit of '_MAX_NUM_PARTS' MB.
Args:
key: String key value.
value: A pickleable value to be stored limited at '_MAX_NUM_PARTS' MB.
"""
entity = ndb.Key(MultipartEntity, key).get()
if not entity:
entity = MultipartEntity(id=key)
entity.SetData(value)
entity.Save()
MultipartCache.Set(key, value)
def Delete(key):
"""Deletes the value in datastore and memcache."""
ndb.Key(MultipartEntity, key).delete()
MultipartCache.Delete(key)
class MultipartEntity(ndb.Model):
"""Container for PartEntity."""
# Number of entities use to store serialized.
size = ndb.IntegerProperty(default=0, indexed=False)
@classmethod
def _post_get_hook(cls, key, future): # pylint: disable=unused-argument
"""Deserializes data from multiple PartEntity."""
entity = future.get_result()
if entity is None or not entity.size:
return
string_id = entity.key.string_id()
part_keys = [ndb.Key(MultipartEntity, string_id, PartEntity, i + 1)
for i in xrange(entity.size)]
part_entities = ndb.get_multi(part_keys)
serialized = ''.join(p.value for p in part_entities if p is not None)
entity.SetData(pickle.loads(serialized))
@classmethod
def _pre_delete_hook(cls, key):
"""Deletes PartEntity entities."""
part_keys = PartEntity.query(ancestor=key).fetch(keys_only=True)
ndb.delete_multi(part_keys)
def Save(self):
"""Stores serialized data over multiple PartEntity."""
serialized_parts = _Serialize(self.GetData())
if len(serialized_parts) > _MAX_NUM_PARTS:
logging.error('Max number of parts reached.')
return
part_list = []
num_parts = len(serialized_parts)
for i in xrange(num_parts):
if serialized_parts[i] is not None:
part = PartEntity(id=i + 1, parent=self.key, value=serialized_parts[i])
part_list.append(part)
self.size = num_parts
ndb.put_multi(part_list + [self])
def GetData(self):
return getattr(self, '_data', None)
def SetData(self, data):
setattr(self, '_data', data)
class PartEntity(ndb.Model):
"""Holds a part of serialized data for MultipartEntity.
This entity key has the form:
ndb.Key('MultipartEntity', multipart_entity_id, 'PartEntity', part_index)
"""
value = ndb.BlobProperty()
class MultipartCache(object):
"""Contains operations for storing values over multiple memcache keys.
Values are serialized, split, and stored over multiple memcache keys. The
head cache stores the expected size.
"""
@classmethod
def Get(cls, key):
"""Gets value in memcache."""
keys = cls._GetCacheKeyList(key)
head_key = cls._GetCacheKey(key)
cache_values = memcache.get_multi(keys)
# Whether we have all the memcache values.
if len(keys) != len(cache_values) or head_key not in cache_values:
return None
serialized = ''
cache_size = cache_values[head_key]
keys.remove(head_key)
for key in keys[:cache_size]:
if key not in cache_values:
return None
if cache_va
|
lues[key] is not None:
serialized += cache_values[key]
return pickle.loads(serialized)
@classmethod
def Set(cls, key, value):
"""Sets a value in memcache."""
serialized_parts = _Serialize(value)
if len(serialized_parts) > _MAX_NUM_PARTS:
logging.error('Max number of
|
parts reached.')
return
cached_values = {}
cached_values[cls._GetCacheKey(key)] = len(serialized_parts)
for i in xrange(len(serialized_parts)):
cached_values[cls._GetCacheKey(key, i)] = serialized_parts[i]
memcache.set_multi(cached_values)
@classmethod
def Delete(cls, key):
"""Deletes all cached values for key."""
memcache.delete_multi(cls._GetCacheKeyList(key))
@classmethod
def _GetCacheKeyList(cls, key):
"""Gets a list of head cache key and cache key parts."""
keys = [cls._GetCacheKey(key, i) for i in xrange(_MAX_NUM_PARTS)]
keys.append(cls._GetCacheKey(key))
return keys
@classmethod
def _GetCacheKey(cls, key, index=None):
"""Returns either head cache key or cache key part."""
if index is not None:
return _MULTIPART_ENTITY_MEMCACHE_KEY + '%s.%s' % (key, index)
return _MULTIPART_ENTITY_MEMCACHE_KEY + key
def _GetValueFromDatastore(key):
entity = ndb.Key(MultipartEntity, key).get()
if not entity:
return None
return entity.GetData()
def _Serialize(value):
"""Serializes value and returns a list of its parts.
Args:
value: A pickleable value.
Returns:
A list of string representation of the value that has been pickled and split
into _CHUNK_SIZE.
"""
serialized = pickle.dumps(value, 2)
length = len(serialized)
values = []
for i in xrange(0, length, _CHUNK_SIZE):
values.append(serialized[i:i + _CHUNK_SIZE])
for i in xrange(len(values), _MAX_NUM_PARTS):
values.append(None)
return values
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractLittlebambooHomeBlog.py
|
Python
|
bsd-3-clause
| 622 | 0.028939 |
def ex
|
tractLittlebambooHomeBlog(item):
'''
Parser for 'littlebamboo.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('FW', 'Fortunate Wife', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
r
|
eturn buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
coursemdetw/2015wcms
|
wsgi/openshift/openshiftlibs.py
|
Python
|
gpl-2.0
| 3,730 | 0.008043 |
#@+leo-ver=5-thin
#@+node:2014spring.20140628104046.1746: * @file openshiftlibs.py
#@@language python
#@@tabwidth -4
#@+others
#@+node:2014spring.20140628104046.1747: ** openshiftlibs declarations
#!/usr/bin/env python
import hashlib, inspect, os, random, sys
#@+node:2014spring.20140628104046.1748: ** get_openshift_secret_token
# Gets the secret token provided by OpenShift
# or generates one (this is slightly less secure, but good enough for now)
def get_openshift_secret_token():
token = os.getenv('OPENSHIFT_SECRET_TOKEN')
name = os.getenv('OPENSHIFT_APP_NAME')
uuid = os.getenv('OPENSHIFT_APP_UUID')
if token is not None:
return token
elif (name is not None and uuid is not None):
return hashlib.sha256(name.encode('utf-8') + '-'.encode('utf-8') + uuid.encode('utf-8')).hexdigest()
return None
#@+node:2014spring.20140628104046.1749: ** openshift_secure
# Loop through all provided variables and generate secure versions
# If not running on OpenShift, returns defaults and logs an error message
#
# This function calls secure_function and passes an array of:
# {
# 'hash': generated sha hash,
# 'variable': name of variable,
# 'original': original value
# }
def openshift_secure(default_keys, secure_function = 'make_secure_key'):
# Attempts to get secret token
my_token = get_openshift_secret_token()
# Only generate random values if on OpenShift
my_list = default_keys
if my_token is not None:
# Loop over each default_key and set the new value
for key, value in default_keys.items():
# Create hash out of token and this key's name
sha = hashlib.sha256(my_token.encode('utf-8') + '-'.encode('utf-8') + key.encode('utf-8')).hexdigest()
# Pass a dictionary so we can add stuff without breaking existing calls
vals = { 'hash': sha, 'variable': key, 'original': value }
# Call user specified function or just return hash
my_list[key] = sha
if secure_function is not None:
# Pick through the global and local scopes to find the function.
possibles = globals().copy()
possibles.update(locals())
supplied_function = possibles.get(secure_function)
if not supplied_function:
raise Exception("Cannot find supplied security function")
else:
my_list[key] = supplied_function(vals)
else:
calling_file = inspect.stack()[1][1]
if os.getenv('OPENSHIFT_REPO_DIR'):
base = os.getenv('OPENSHIFT_REPO_DIR')
calling_file.replace(base,'')
sys.stderr.write("OPENSHIFT WARNING: Using default values for secure variables, please manually modify in " + calling_file + "\n")
return my_list
#@+node:2014spring.20140628104046.1750: ** make_secure_key
# This function transforms default keys into per-deployment random keys;
def make_secure_key(key_info):
hashcode
|
= key_info['hash']
key = key_info['variable']
original = key_info['original']
# These are the legal password characters
# as pe
|
r the Django source code
# (django/contrib/auth/models.py)
chars = 'abcdefghjkmnpqrstuvwxyz'
chars += 'ABCDEFGHJKLMNPQRSTUVWXYZ'
chars += '23456789'
# Use the hash to seed the RNG
random.seed(int("0x" + hashcode[:8], 0))
# Create a random string the same length as the default
rand_key = ''
for _ in range(len(original)):
rand_pos = random.randint(0,len(chars))
rand_key += chars[rand_pos:(rand_pos+1)]
# Reset the RNG
random.seed()
# Set the value
return rand_key
#@-others
#@-leo
|
evereux/flicket
|
scripts/users_import_from_json.py
|
Python
|
mit
| 2,390 | 0.00251 |
#! usr/bin/python3
# -*- coding: utf8 -*-
import datetime
import json
import os
from flask_script import Command
from scripts.users_export_to_json import json_user_file
from application import db
from application.flicket.models.flicket_user import FlicketUser
class JsonUser:
def __init__(self, username, name, email, password):
self.username = username
self.name = name
self.email = email
self.name = name
self.password = password
class ImportUsersFromJson(Command):
"""
Command used by manage.py to import users from a json file formatted such:
[
{ usernam
|
e, name, email, password.
]
"""
@staticmethod
def run():
# check if file exists
if not os.path.isfile(json_user_file):
print('Could not find json file "{}". Exiting ....'.format(json_user_file))
exit()
# read json file
with open(json_user_file) as data_file:
json_users = json.load(data_file)
# check formatting of json file
valid_json_fields = ['username', 'name
|
', 'email', 'password']
for user in json_users:
if not all(f in user for f in valid_json_fields):
print('json file not formatted correctly. Exiting.')
exit()
# add users to database.
for user in json_users:
# encode password to bytes
password = str.encode(user['password'])
# create json_user object
json_user = JsonUser(user['username'], user['name'], user['email'], password)
# check tht user doesn't already exist.
existing_user = FlicketUser.query.filter_by(email=json_user.email)
if existing_user.count() > 0:
print('User {} {} already exists in the database.'.format(json_user.name, json_user.email))
continue
# add the user
print('Adding the user {} {} to the database.'.format(json_user.name, json_user.email))
new_user = FlicketUser(username=json_user.username, name=json_user.name, email=json_user.email,
password=json_user.password, date_added=datetime.datetime.now())
db.session.add(new_user)
db.session.commit()
|
AgapiGit/RandomPasswordGenerator
|
RandomPasswordGenerator/manage.py
|
Python
|
mit
| 843 | 0.001186 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RandomPasswordGenerator.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django
|
is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_com
|
mand_line(sys.argv)
|
camflan/agro
|
agro/sources/__init__.py
|
Python
|
bsd-3-clause
| 1,319 | 0.006823 |
from datetime import datetime
import logging
import sys
from django.conf import settings
log = logging.getLogger('agro.sources')
tree_modules_to_try = [ "xml.etree.cElementTree", "elementtree.ElementTree", "cElementTree", ]
element_tree = None
for tree in tree_modules_to_try:
try:
try:
element_tree = __import__('%s' % tree, {}, {}, [''], -1)
except:
element_tree = __import__('%s' % tree, {}, {}, [''])
break
except ImportError, e:
continue
except Exception, e:
log.error("%s" % e)
raise
if element_tree is None:
raise ImportError("No ElementTree found.")
log.debug("Using specified etree module: %s" % element_tree)
def import_source_modules(source_list=settings.AGRO_SETTINGS['source_list'], class_name=''):
sources = []
for source in source_list:
try:
log.debug('trying to load %s' % source)
try:
s = __import__("agro.sources.%s" % source, {}, {}, ['%s%s' % (source, class_name)], -1)
|
except:
s = __import__("agro.sources.%s" % source, {},
|
{}, ['%s%s' % (source, class_name)])
if s:
sources.append(s)
except Exception, e:
log.error('unable to load %s: %s', source, e)
return sources
|
sdpython/pyquickhelper
|
src/pyquickhelper/helpgen/notebook_exporter.py
|
Python
|
mit
| 4,770 | 0.001048 |
# -*- coding: utf-8 -*-
"""
@file
@brief Customer notebook exporters.
"""
import os
from textwrap import indent
from traitlets import default
from traitlets.config import Config
from jinja2 import DictLoader
from nbconvert.exporters import RSTExporter
from nbconvert.filters.pandoc import convert_pandoc
def convert_pandoc_rst(source, from_format, to_format, extra_args=None):
"""
Overwrites `convert_pandoc
<https://github.com/jupyter/nbconvert/blob/master/nbconvert/filters/pandoc.py>`_.
@param source string to convert
@param from_format from format
@param to_format to format
@param extra_args extra arguments
@return results
"""
return convert_pandoc(source, from_format, to_format, extra_args=None)
def process_raw_html(source, extra_args=None):
"""
Replaces the output of
`add_menu_notebook
<http://www.xavierdupre.fr/app/jyquickhelper/helpsphinx/jyquickhelper/
helper_in_notebook.html#jyquickhelper.helper_in_notebook.add_notebook_menu>`_
by:
::
.. contents::
:local:
"""
if source is None:
return source # pragma: no cover
if 'var update_menu = function() {' in source:
return "\n\n.. contents::\n :local:\n\n"
return "\n\n.. raw:: html\n\n" + indent(source, prefix=' ')
class UpgradedRSTExporter(RSTExporter):
"""
Exports :epkg:`rst` documents.
Overwrites `RSTExporter <https://github.com/jupyter/
nbconvert/blob/master/nbconvert/exporters/rst.py>`_.
* It replaces `convert_pandoc <https://github.com/jupyter/
nbconvert/blob/master/nbconvert/filters/pandoc.py>`_
by @see fn convert_pandoc_rst.
* It converts :epkg:`svg` into :epkg:`png` if possible,
see @see fn process_raw_html.
* It replaces some known :epkg:`javascript`. The output of function
`add_menu_notebook <http://www.xavierdupre.fr/app/jyquickhelper/helpsphinx/jyquickhelper/
helper_in_notebook.html#jyquickhelper.helper_in_notebook.add_notebook_menu>`_
is replaced by ``.. contents::``.
.. index:: notebook export, nbconvert
It extends the template
`rst.tpl <https://github.com/jupyter/nbconvert/blob/master/nbconvert/templates/rst.tpl>`_.
New template is `rst_modified.tpl <https://github.com/sdpython/pyquickhelper/blob/master/
src/pyquickhelper/helpgen/rst_modified.tpl>`_.
It follows the hints given at
`Programatically creating templates
<https://nbconvert.readthedocs.io/en/latest/
nbconvert_library.html#Programatically-creating-templates>`_.
:epkg:`jyquickhelper` should add a string highly recog
|
nizable when adding a menu.
"""
def __init__(self, *args, **kwargs):
"""
Overwrites the extra loaders to get the right template.
|
"""
filename = os.path.join(os.path.dirname(__file__), 'rst_modified.tpl')
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
filename = os.path.join(os.path.dirname(__file__), 'rst.tpl')
with open(filename, 'r', encoding='utf-8') as f:
content2 = f.read()
dl = DictLoader({'rst_modified.tpl': content, 'rst.tpl': content2})
kwargs['extra_loaders'] = [dl]
RSTExporter.__init__(self, *args, **kwargs)
def default_filters(self):
"""
Overrides in subclasses to provide extra filters.
This should return an iterable of 2-tuples: (name, class-or-function).
You should call the method on the parent class and include the filters
it provides.
If a name is repeated, the last filter provided wins. Filters from
user-supplied config win over filters provided by classes.
"""
for k, v in RSTExporter.default_filters(self):
yield (k, v)
yield ('convert_pandoc_rst', convert_pandoc_rst)
yield ('process_raw_html', process_raw_html)
output_mimetype = 'text/restructuredtext'
export_from_notebook = "reST"
@default('template_file')
def _template_file_default(self):
return "rst_modified.tpl"
@default('file_extension')
def _file_extension_default(self):
return '.rst'
@default('template_name')
def _template_name_default(self):
return 'rst'
@property
def default_config(self):
c = Config({
'ExtractOutputPreprocessor': {
'enabled': True,
'output_filename_template': '{unique_key}_{cell_index}_{index}{extension}'
},
'HighlightMagicsPreprocessor': {
'enabled': True
},
})
c.merge(super(UpgradedRSTExporter, self).default_config)
return c
|
bjodah/finitediff
|
finitediff/grid/rebalance.py
|
Python
|
bsd-2-clause
| 4,269 | 0.000937 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import math
import numpy as np
from scipy.interpolate import interp1d
def _avgdiff(x):
dx = np.diff(x)
dx2 = np.zeros_like(x)
dx2[0], dx2[-1] = dx[0], dx[-1]
dx2[1:-1] = 0.5 * (dx[1:] + dx[:-1])
return dx2
def rebalanced_grid(
grid, err, base=0.25, num=None, resolution_factor=10, smooth_fact=1.0
):
if num is None:
num = grid.size
dx = np.diff(grid)
area_err = 0.5 * np.dot(err[1:] + err[:-1], dx) # trapezoidal rule
dx2 = _avgdiff(grid)
def smooth_err(x):
tot = 0
for i, (gx, e) in enumerate(zip(grid, err)):
fwhm = dx2[i] * smooth_fact
tot += e * np.exp(-((x - gx) ** 2) / (2 * (fwhm / 2.35482) ** 2))
return tot
finegrid = np.zeros((grid.size - 1) * resolution_factor + 1)
for i in range(grid.size - 1):
finegrid[i * resolution_factor : (i + 1) * resolution_factor] = np.linspace(
grid[i], grid[i + 1], resolution_factor + 1
)[:-1]
finegrid[-resolution_factor - 1 :] =
|
np.linspace(
grid[-2], grid[-1], resolution_factor + 1
)
smoothed = smooth_err(finegrid) + base * area_err / (grid[-1] - grid[0])
assert np.all(smoothed > 0)
assert np.all(_avgdiff(finegrid) > 0)
interr = np.cumsum(smoothed * _avgdiff(finegrid))
cb = interp1d(interr, finegrid)
return cb(np.linspace(interr[0], interr[-1], num))
def pre_pruning_mask(grid, rtol=1e-12, atol=0.0):
"""Returns a mask for grid pruning.
Any grid spacing smaller than ``r
|
tol*gridvalue + atol`` will
be pruned. In general the value on the right is removed unless it is
the last point in the grid.
Parameters
----------
grid : array
rtol : float
atol : float
Returns
-------
NumPy array of ``numpy.bool_`` (to be used as mask).
"""
if np.any(np.diff(grid) < 0):
raise ValueError("grid needs to be monotonic")
limit = grid[-1] - (atol + abs(rtol * grid[-1]))
mask = np.empty(grid.size, dtype=np.bool_)
mask[grid.size - 1] = True # rightmost point included
for ridx in range(grid.size - 2, -1, -1):
if grid[ridx] < limit:
mask[ridx] = True
break
else:
mask[ridx] = False
else:
raise ValueError("no grid-points left")
mask[0] = True # leftmost point included
limit = grid[0] + abs(rtol * grid[0]) + atol
for idx in range(1, ridx):
if grid[idx] < limit:
mask[idx] = False
else:
mask[idx] = True
limit = grid[idx] + abs(rtol * grid[idx]) + atol
return mask
def combine_grids(grids, **kwargs):
"""Combines multiple grids and prunes them using pre_pruning mask
Parameters
----------
grids : iterable of array_like grids
\\*\\* : dict
Keyword arguments passed on to pre_pruning_mask
Returns
-------
Strictly increasing monotonic array
"""
supergrid = np.sort(np.concatenate(grids))
mask = pre_pruning_mask(supergrid, **kwargs)
return supergrid[mask]
def grid_pruning_mask(grid, err, ndrop=None, protect_sparse=None, pow_err=2, pow_dx=2):
"""Returns a mask for grid pruning.
Parameters
----------
grid : array
err : array
ndrop : int
If not provided taken as 25% of grid size (rounded upward).
protect_sparse : int
If not provided taken as 25% of grid size (rounded upward).
pow_err : number
Exponent of error in weighting.
pow_dx : number
Exponent of grid spacing in weighting.
"""
if ndrop is None:
ndrop = math.ceil(grid.size * 0.25)
if protect_sparse is None:
protect_sparse = math.ceil(grid.size * 0.25)
dx = _avgdiff(grid)
protected = np.argsort(dx)[-protect_sparse:]
score = err ** pow_err * dx ** pow_dx
importance = np.argsort(score)
drop = []
for considered in importance:
if considered in protected:
continue
if considered - 1 in drop or considered + 1 in drop:
continue
drop.append(considered)
if len(drop) == ndrop:
break
return ~np.in1d(np.arange(grid.size), drop)
|
torypages/luigi
|
test/contrib/pig_test.py
|
Python
|
apache-2.0
| 5,241 | 0.001336 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import tempfile
import luigi
from helpers import with_config, unittest
from luigi.contrib.pig import PigJobError, PigJobTask
from mock import patch
class SimpleTestJob(PigJobTask):
def output(self):
return luigi.LocalTarget('simple-output')
def pig_script_path(self):
return "my_simple_pig_script.pig"
class ComplexTestJob(PigJobTask):
def output(self):
return luigi.LocalTarget('complex-output')
def pig_script_path(self):
return "my_complex_pig_script.pig"
def pig_env_vars(self):
return {'PIG_CLASSPATH': '/your/path'}
def pig_properties(self):
return {'pig.additional.jars': '/path/to/your/jar'}
def pig_parameters(self):
return {'YOUR_PARAM_NAME': 'Your param value'}
def pig_options(self):
|
return ['-x', 'local']
class SimplePigTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('subprocess.Popen')
def test_run__success(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 0)
try:
job = SimpleTestJob()
job.run()
self.assertEqual([['/usr/share/pig/bin/pig', '-f', 'my_simple_pig_script.pig']], arglist_result
|
)
finally:
subprocess.Popen = p
@patch('subprocess.Popen')
def test_run__fail(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 1)
try:
job = SimpleTestJob()
job.run()
self.assertEqual([['/usr/share/pig/bin/pig', '-f', 'my_simple_pig_script.pig']], arglist_result)
except PigJobError as e:
p = e
self.assertEqual('stderr', p.err)
else:
self.fail("Should have thrown PigJobError")
finally:
subprocess.Popen = p
class ComplexPigTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('subprocess.Popen')
def test_run__success(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 0)
try:
job = ComplexTestJob()
job.run()
self.assertEqual([['/usr/share/pig/bin/pig', '-x', 'local', '-p', 'YOUR_PARAM_NAME=Your param value', '-propertyFile', 'pig_property_file', '-f', 'my_complex_pig_script.pig']], arglist_result)
# Check property file
with open('pig_property_file') as pprops_file:
pprops = pprops_file.readlines()
self.assertEqual(1, len(pprops))
self.assertEqual('pig.additional.jars=/path/to/your/jar\n', pprops[0])
finally:
subprocess.Popen = p
@patch('subprocess.Popen')
def test_run__fail(self, mock):
arglist_result = []
p = subprocess.Popen
subprocess.Popen = _get_fake_Popen(arglist_result, 1)
try:
job = ComplexTestJob()
job.run()
except PigJobError as e:
p = e
self.assertEqual('stderr', p.err)
self.assertEqual([['/usr/share/pig/bin/pig', '-x', 'local', '-p', 'YOUR_PARAM_NAME=Your param value', '-propertyFile', 'pig_property_file', '-f', 'my_complex_pig_script.pig']], arglist_result)
# Check property file
with open('pig_property_file') as pprops_file:
pprops = pprops_file.readlines()
self.assertEqual(1, len(pprops))
self.assertEqual('pig.additional.jars=/path/to/your/jar\n', pprops[0])
else:
self.fail("Should have thrown PigJobError")
finally:
subprocess.Popen = p
def _get_fake_Popen(arglist_result, return_code, *args, **kwargs):
def Popen_fake(arglist, shell=None, stdout=None, stderr=None, env=None, close_fds=True):
arglist_result.append(arglist)
class P(object):
def wait(self):
pass
def poll(self):
return 0
def communicate(self):
return 'end'
def env(self):
return self.env
p = P()
p.returncode = return_code
p.stderr = tempfile.TemporaryFile()
p.stdout = tempfile.TemporaryFile()
p.stdout.write(b'stdout')
p.stderr.write(b'stderr')
# Reset temp files so the output can be read.
p.stdout.seek(0)
p.stderr.seek(0)
return p
return Popen_fake
|
simonspa/django-datacollect
|
datacollect/survey/management/commands/edit_relations.py
|
Python
|
gpl-3.0
| 2,980 | 0.00906 |
from django.core.management.base import BaseCommand, CommandError
from survey.models import Record
from fuzzywuzzy import fuzz
class Command(BaseCommand):
help = 'Finds fuzzy name matches and allows to alter their relation'
def add_arguments(self, parser):
parser.add_argument('start', nargs='?', type=int, default=0)
def handle(self, *args, **options):
rx = Record.objects.all()
all = rx.count()
cnt = 0
print "Iterating over " + str(all) + " database records, starting at " + str(options['start'])
for i,r1 in enumerate(rx):
# Obey start position argument
if i < options['start']: continue
for j,r2 in enumerate(rx):
if j <= i: continue
ratio = fuzz.ratio(r1.name,r2.name)
if ratio < 75:
continue
if r1.person_id == r2.person_id:
continue
if r1.country != r2.country:
continue
if r1.gender != r2.gender:
continue
# Print leftovers:
print ""
print u"Score: {0:3d} {1:30}{2}".format(ratio,r1.name,r2.name)
print u"Person-ID: {1:30}{2}".format(ratio,r1.p
|
erson_id,r2.person_id)
print u"Follow-up: {0!r:<30}{1}".format(r1.follow_up_case,r2.follow_up_case)
print u"Date intervention: {0:30}{1}".format(str(r1.date_intervention),str(r2.date_intervention))
print u"Issue area: {0:30}{1}".format(r1.issue_area,r2.issue_area)
print u"Activities: {0:30}{1}".format(r1.relevant_activities,r2.re
|
levant_activities)
if Record.objects.filter(pk=r1.pk, follow_ups__pk=r2.pk).exists():
print u"Relation exists? ************** YES ****************"
else:
print u"Relation exists? .............. NO ................"
while True:
data = str(raw_input("(a)dd, (r)emove relation, (s)kip or (p)ause: "))
if data.lower() not in ('a', 'r', 's', 'p'):
print("Not an appropriate choice.")
else:
break
if data == "a":
r1.follow_ups.add(r2)
r1.save()
elif data == "r":
r1.follow_ups.remove(r2)
r1.save()
elif data == "s":
continue;
elif data == "p":
print "Restart with argument: " + str(i)
self.stdout.write(self.style.SUCCESS('Paused at %i' % i))
return
cnt += 1
print "Status: {:2.1f}".format((100.0*i)/all)
self.stdout.write(self.style.SUCCESS('Successfully edited all fuzzy relations'))
|
95ellismle/FinancesApp2
|
Gui/App.py
|
Python
|
gpl-3.0
| 3,438 | 0.013089 |
# Importing Modules from PyQt5
from PyQt5.QtWidgets import QSizePolicy, QPushButton, QFrame, QWidget, QStackedWidget
from PyQt5.QtGui import QColor
# Importing Modules from the App
from Gui import Table, Plot, Funcs, Budget
from Settings import StyleSheets as St
def smallerNumber(number1, number2):
if number1 < number2:
return number1
else:
return number2
def fill_a_list(List, filler, length):
List = List + [filler for i in range(length)]
return List
class App(QWidget):
# The Main Window... This Widget will be the main window.
# Other widgets such as the TablePage and PlotPage will be called from here in a StackedWidget
def __init__(self):
super(App, self).__init__()
self.setWindowTitle('Finances App 2') # Set the title of the app
self.setGeometry(500, 500, 1600, 880) # Set the Geometry of the Window
### Setting the Colour of the app background
p = self.palette()
b_col = QColor(St.background_colour)
p.setColor(self.backgroundRole(), b_col)
self.setPalette(p)
self.initUI()
def initUI(self):
self.TableStackItem = Table.TablePage()
self.PlotStackItem = Plot.App_Bit()
self.BudgetStackItem = Budget.SettingsPage()
sidebar_frame = self.sideBar()
self.FullStack = QStackedWidget(self)
self.FullStack.addWidget(self.TableStackItem)
self.FullStack.addWidget(self.PlotStackItem)
self.FullStack.addWidget(self.BudgetStackItem)
self.onTabButton()
Funcs.AllInOneLayout(self,[sidebar_frame,self.FullStack],Stretches=[1,10],VH="H")
self.show()
def sideBar(self):
sidebar_frame = QFrame()
sidebar_frame.setMinimumWidth(110)
#sidebar_frame.setStyleSheet(St.StyleSheets['Sidebar'])
button_titles = ['Data\nTables','Plotting','Budget']
button_titles = fill_a_list(button_titles, '', St.number_of_buttons_on_sidebar-len(button_titles))
self.buttons
|
= []
but_funcs = [self.onTabButton, self.onPlotButton, self.onBudgetButton ]
but_funcs = fill_a_list(but_funcs, self.emptyFunc, St.number_of_buttons_on_sidebar-len(but_funcs))
for i in range(St.number_of_buttons_on_sidebar):
button = QPushButton(button_titles[i])
button.setStyleSheet(St.StyleSheets['Button%i'%i])
button.clicked.connect(but_funcs[i])
button.setSizePolicy(QSizePolicy.Expanding, QSizePoli
|
cy.Expanding)
button.setCheckable(True)
self.buttons.append(button)
Funcs.AllInOneLayout(sidebar_frame, self.buttons, VH='V')# add button and button2 to the sidebar_frame vertically, aligning them at the top.
#frame_layout.setSizeLayout(QSizePolicy.Expanding, QSizePolicy.Expanding)
return sidebar_frame
# These buttons change which widget we can see in the stacked widget
def onTabButton(self):
self.TableStackItem.setFocus()
self.FullStack.setCurrentIndex(0)
def onPlotButton(self):
self.PlotStackItem.setFocus()
self.FullStack.setCurrentIndex(1)
def onBudgetButton(self):
self.BudgetStackItem.setFocus()
self.FullStack.setCurrentIndex(2)
def emptyFunc(self):
return 0
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/scripts/api/history_delete_history.py
|
Python
|
gpl-3.0
| 389 | 0.03856 |
#!/usr/bin/env python
import os, sys
|
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import delete
try:
assert sys.argv[2]
except IndexError:
print 'usage: %s key url [purge (true/false)] ' % os.path.basename( sys.argv[0] )
sys.exit( 1 )
try:
data = {}
data[ 'purge' ] = sys.argv[3]
except IndexError:
pass
delete( sys.argv[1], sys.argv[2
|
], data )
|
Becksteinlab/BornProfiler
|
scripts/apbs-bornprofile-init.py
|
Python
|
gpl-3.0
| 1,462 | 0.008208 |
#!/usr/bin/env python
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# BornProfiler --- A package to calculate electrostatic free energies with APBS
# Written by Kaihsu Tai, Lennard van der Feltz, and Oliver Beckstein
# Released under the GNU Public Licence, version 3
#
import bornprofiler
import logging
logger = logging.getLogger('bornprofiler')
usage = """%prog [options]
Set up the BornProfiler configuration directories. This only has to be
done o
|
nce (but it will not cause any damage to run this script again).
"""
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(usage=usage)
opts, args = parser.parse_args()
bornprofiler.start_logging()
bornprofiler.config.setup()
if bornprofiler.config.check_setu
|
p():
logger.info("Init successful: you now have the template directories under %r.",
bornprofiler.config.configdir)
logger.info("The package can also be customized by editing %r.",
bornprofiler.config.CONFIGNAME)
logger.info("Questions and feedback: Oliver Beckstein <obeckste@asu.edu>")
else:
logger.error("Something is wrong: Failed to setup the template directories.")
logger.warn("You can proceed but problems migh arise and you will not be able "
"to easily customize generation of runs and submission scripts.")
bornprofiler.stop_logging()
|
regardscitoyens/nosfinanceslocales_scraper
|
localfinance/spiders/localfinance_spider.py
|
Python
|
mit
| 7,108 | 0.004361 |
# -*- coding: utf-8 -*-
import re
import pandas as pd
from scrapy.spiders import Spider
from scrapy.selector import Selector
from ..parsing.zone import (
CityZoneParser,
EPCIZoneParser,
DepartmentZoneParser,
RegionZoneParser
)
from ..item import LocalFinance
from ..utils import DOM_DEP_MAPPING, uniformize_code, convert_dom_code, convert_city
class LocalFinanceSpider(Spider):
"""Basic spider which crawls all pages of finance of french towns, departments
regions and EPCI.
"""
name = "localfinance"
domain = "http://alize2.finances.gouv.fr"
allowed_domains = [domain]
def __init__(self, year=2014, zone_type='city'):
"""Load insee code of every commune in france and generate all the urls to
crawl."""
self.start_urls = []
if zone_type == 'city' or zone_type == 'all':
self.start_urls += self.get_commune_urls(year)
if zone_type == 'department' or zone_type == 'all':
self.start_urls += self.get_dep_urls(year)
if zone_type == 'region' or zone_type == 'all':
self.start_urls += self.get_reg_urls(year)
if zone_type == 'epci' or zone_type == 'all':
self.start_urls += self.get_epci_urls(year)
def get_dep_urls(self, year):
insee_code_file = "data/locality/depts201
|
3.txt"
data = pd.io.parsers.read_csv(insee_code_file, '\t')
data['DEP'] = uniformize_code(data, 'DEP')
data['DEP'] = convert_dom_code(data)
baseurl = "%s/departements/detail.php?dep=%%(DEP)s&exercice=%s" % (self.domain, year)
return [baseurl % row for __, row in data.iterrows()]
def get_reg_urls(self, year):
insee
|
_code_file = "data/locality/reg2013.txt"
data = pd.io.parsers.read_csv(insee_code_file, '\t')
data['REGION'] = uniformize_code(data, 'REGION')
# Special case for DOM as usual
def set_dom_code(reg):
if reg == '001':
return '101'
elif reg == '002':
return '103'
elif reg == '003':
return '102'
elif reg == '004':
return '104'
else:
return reg
data['REGION'] = data['REGION'].apply(set_dom_code)
baseurl = "%s/regions/detail.php?reg=%%(REGION)s&exercice=%s" % (self.domain, year)
return [baseurl % row for __, row in data.iterrows()]
def get_epci_urls(self, year):
data = pd.read_csv('data/locality/epci.csv')
base_url = "%s/communes/eneuro/detail_gfp.php?siren=%%(siren)s&dep=%%(dep)s&type=BPS&exercice=%s" % (self.domain, str(year))
return [base_url % row for __, row in data.iterrows()]
def get_commune_urls(self, year):
"""
The communes pages urls depends on 5 parameters:
- COM: the insee code of the commune
- DEP: the department code on 3 characters
- type: type of financial data, BPS is for the whole data.
- exercise: year of financial data
"""
insee_code_file = "data/locality/france2013.txt"
data = pd.io.parsers.read_csv(insee_code_file, '\t')
# XXX: insee_communes file contains also "cantons", filter out these lines
mask = data['ACTUAL'].apply(lambda v: v in [1, 2, 3])
data = data[mask].reindex()
# XXX: as always paris is the exception. City code is 101 for years < 2010 and 056 for years >= 2010
# 056 is the right code, add 101 also to crawl pages for years < 2010
paris_row = data[(data.COM == 56) & (data.DEP == '75')].copy()
paris_row.COM = 101
data = data.append(paris_row)
data['DEP'] = uniformize_code(data, 'DEP')
data['COM'] = uniformize_code(data, 'COM')
data['DEP'] = convert_dom_code(data)
data['COM'] = data.apply(convert_city, axis=1)
base_url = "%s/communes/eneuro/detail.php?icom=%%(COM)s&dep=%%(DEP)s&type=BPS¶m=0&exercice=%s" % (self.domain, str(year))
return [base_url % row for __, row in data.iterrows()]
def parse(self, response):
if "/communes/eneuro/detail_gfp.php" in response.url:
return self.parse_epci(response)
elif "/communes/eneuro/detail.php" in response.url:
return self.parse_commune(response)
elif "/departements/detail.php" in response.url:
return self.parse_dep(response)
elif "/regions/detail.php" in response.url:
return self.parse_reg(response)
def parse_commune(self, response):
"""Parse the response and return an Account object"""
hxs = Selector(response)
h3_strings = hxs.xpath("//body/h3/text()").extract()
if h3_strings and h3_strings[0].startswith("Aucune commune"):
return []
icom, dep, year = re.search('icom=(\d{3})&dep=(\w{3})&type=\w{3}¶m=0&exercice=(\d{4})', response.url).groups()
# XXX: better to use the real insee code for later analysis, not icom and dep in url.
real_dep = dict([(val, key) for key, val in DOM_DEP_MAPPING.items()]).get(dep, dep[1:])
real_com = icom if dep not in DOM_DEP_MAPPING.values() else icom[1:]
real_insee_code = real_dep + real_com
# XXX: hack for paris ! \o/
if real_insee_code == '75101':
real_insee_code = '75056'
data = CityZoneParser(real_insee_code, year, response.url).parse(hxs)
return LocalFinance(id=real_insee_code, data=data)
def parse_epci(self, response):
hxs = Selector(response)
siren, year = re.search('siren=(\d+)&dep=\w{3}&type=BPS&exercice=(\d{4})', response.url).groups()
if 'Aucun GFP correspondant' in response.body:
self.logger.warning("No epci for siren=%s and year=%s (%s)" % (siren, year, response.url))
return
data = EPCIZoneParser("", year, response.url, siren).parse(hxs)
return LocalFinance(id=siren, data=data)
def parse_dep(self, response):
hxs = Selector(response)
h3_strings = hxs.xpath("//body/h3/text()").extract()
department_id, year = re.search('dep=(\w{3})&exercice=(\d{4})', response.url).groups()
if h3_strings and h3_strings[0].startswith(u'Aucune donn\xe9e'):
self.logger.warning("No data found for department=%s and year=%s (%s)" % (department_id, year, response.url))
return
data = DepartmentZoneParser(department_id, year, response.url).parse(hxs)
return LocalFinance(id=department_id, data=data)
def parse_reg(self, response):
hxs = Selector(response)
h3_strings = hxs.xpath("//body/h3/text()").extract()
region_id, year = re.search('reg=(\w{3})&exercice=(\d{4})', response.url).groups()
if h3_strings and h3_strings[0].startswith(u'Aucune donn\xe9e'):
self.logger.warning("No data found for region=%s and year=%s (%s)" % (region_id, year, response.url))
return
data = RegionZoneParser(region_id, year, response.url).parse(hxs)
return LocalFinance(id=region_id, data=data)
|
kodexlab/eleve
|
eleve/segment.py
|
Python
|
lgpl-3.0
| 6,623 | 0.004379 |
""" :mod:`eleve.segment`
==========================
The segmenter is available by importing ``eleve.Segmenter``. It is used to
segment sentences (regroup tokens that goes together).
"""
import logging
from math import isnan
logger = logging.getLogger(__name__)
class Segmenter:
def __init__(self, storage, max_ngram_length=None):
""" Create a segmenter.
:param storage: A storage object that has been trained on a corpus (should have a ``query_autonomy`` method).
:param max_ngram_length: The maximum length of n-gram that can be "merged".
It should be strictly smaller to the storage's n-gram length.
"""
assert hasattr(
storage, "query_autonomy"
), "The storage object should have a query_autonomy method."
self.storage = storage
if max_ngram_length is None:
assert hasattr(
storage, "default_ngram_length"
), "The storage should have a default_ngram_length attribute."
self.max_ngram_length = storage.default_ngram_length - 1
else:
assert (
isinstance(max_ngram_length, int) and max_ngram_length > 1
), "max_ngram_length should be an integer bigger than one"
if max_ngram_length >= storage.default_ngram_length:
logger.warning(
"consider n-grams of size %d at max, BUT storage backend has a default ngram length of %s."
% (max_ngram_length, storage.default_ngram_length)
)
self.max_ngram_length = max_ngram_length
def segment(self, sentence):
""" Segment a sentence.
:param sentence: A list of tokens.
:returns: A list of sentence fragments. A sentence fragment is a list of tokens.
"""
if len(sentence) > 1000:
logger.warning(
"The sentence you want to segment is HUGE. This will take a lot of memory."
)
# sentence = (
# [self.storage.sentence_start] + sentence + [self.storage.sentence_end]
# )
# dynamic programming to segment the sentence
best_segmentation = [[]] * (len(sentence) + 1)
best_score = [0] + [float("-inf")] * len(sentence)
# best_score[1] -> autonomy of the first word
# best_score[2] -> sum of autonomy of the first two words, or autonomy of the first two
# ...
order = self.max_ngram_length
query_autonomy = self.storage.query_autonomy
for i in range(1, len(sentence) + 1):
for j in range(1, order + 1):
if i - j < 0:
break
a = query_autonomy(sentence[i - j : i])
if isnan(a):
a = -100.0
score = best_score[i - j] + a * j
if score > best_score[i]:
best_score[i] = score
best_segmentation[i] = best_segmentation[i - j] + [
sentence[i - j : i]
]
# keep the best segmentation and remove the None
best_segmentation = best_segmentation[len(sentence)]
best_segmentation = list(filter(None, best_segmentation))
# best_segmentation.pop(0)
# best_segmentation.pop()
return best_segmentation
def segment_nbest(self, sentence, nbest=3):
""" Segment a sentence.
:param sentence: A list of tokens.
:returns: A list of sentence fragments. A sentence fragment is a list of tokens.
"""
from collections import namedtuple
SegResult = namedtuple("SegResult", "score words")
if len(sentence) > 1000:
logger.warning(
"The sentence you want to segment is HUGE. This will take a lot of memory."
)
sentence = (
[self.storage.sentence_start] + sentence + [self.storage.sentence_end]
)
# dynamic programming to segment the sentence
# list of lists of SegResult
best_segmentations = [[SegResult(0.0, [])]] * (len(sentence) + 1)
best_score = [0] + [float("-inf")] * len(sentence)
# best_score[1] -> autonomy of the first word
# best_score[2] -> sum of autonomy of the first two words, or autonomy of the first two
# ...
order = self.max_ngram_length
query_autonomy = self.storage.query_autonomy
for i in range(1, len(sentence) + 1):
segmentations_at_i = []
for j in range(1, order + 1):
if i - j < 0:
break
a = query_autonomy(sentence[i - j : i])
if isnan(a):
a = -100.0
else:
a = a*j
segmentations_at_i.extend([SegResult(previous_best.score + a, previous_best.words + [sentence[i-j: i]]) for previous_best in best_segmentations[i-j] ])
best_segmentations[i] = sorted(segmentations_at_i, key=lambda x:x.score)[-nbest:]
#return [seg.words for seg in best_segmentations[-1][-nbest:]]
return [seg.words[1:-1] for seg in best_segmentations[-1][-nbest:]]
@staticmethod
def tokenInWord(w):
for i,c in enumerate(w):
yield "{}-{}_{}".format(c, "".join(w[0:max(i,0)]),"".join(w[i+1:]))
@staticmethod
def formatSentenceTokenInWord(sent):
return " ".join([c for w in sent for c in Segmenter.tokenInWord(w)])
def segmentSentenceTIW(self, sent: str) -> str:
return Segmenter.formatSentenceTokenInWord(self.segment(tuple(sent.split(" "))))
def segmentSentenceTIWBIES(self, sent:str) -> str:
tokens = tuple(sent.split(" "))
words = self.segment(tokens)
bies = []
for w in words:
chartoks = list(self.tokenInWord(w))
if len(w) == 1:
bies.append(chartoks[0] + "-S")
else:
bies.append(chartoks[0] + "-B")
for i in chartoks[1:-1]:
bies.append(i + "-I")
|
bies.append(chartoks[-1] + "-E")
return " ".join(bies)
def segmentSentenceBIES(self, sent: str) -> str:
tokens = tuple(sent.split(" "))
words = self.segment(tokens)
bies = []
for w in words:
if len(w) == 1:
bies.append(w[0] + "-S")
else:
bies.append(w[0] + "-B")
for i in w[1:-1]:
bies.append(i + "-I")
|
bies.append(w[-1] + "-E")
return " ".join(bies)
|
kiddinn/plaso
|
tests/cli/helpers/output_modules.py
|
Python
|
apache-2.0
| 2,511 | 0.002788 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the output modules CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import output_modules
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class OutputModulesArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the output modules CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [-o FORMAT]
|
[-w OUTPUT_FILE] [--fields FIELDS]
[--additional_fields ADDITIONAL_FIELDS]
Test argument parser.
optional arguments:
--additional_fields ADDITIONAL_FIELDS, --additional-fields ADDITIONAL_FIELDS
Defines extra fields to be included in the output, in
addition to the default fields, which are datetime,
timestamp_desc, source, source_long, message, parser,
display_name, tag.
--fields
|
FIELDS Defines which fields should be included in the output.
-o FORMAT, --output_format FORMAT, --output-format FORMAT
The output format. Use "-o list" to see a list of
available output formats.
-w OUTPUT_FILE, --write OUTPUT_FILE
Output filename.
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
output_modules.OutputModulesArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.output_format = 'dynamic'
options.write = 'output.dynamic'
test_tool = tools.CLITool()
output_modules.OutputModulesArgumentsHelper.ParseOptions(
options, test_tool)
self.assertEqual(test_tool._output_format, options.output_format)
self.assertEqual(test_tool._output_filename, options.write)
# Test with a configuration object missing.
with self.assertRaises(errors.BadConfigObject):
output_modules.OutputModulesArgumentsHelper.ParseOptions(options, None)
if __name__ == '__main__':
unittest.main()
|
pacoqueen/upy
|
formularios/consulta_ventas_ticket.py
|
Python
|
gpl-2.0
| 19,273 | 0.009814 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado #
# (pacoqueen@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## consulta_ventas_ticket.py - sum((PVP - IVA) * porcentaje_tarifa)
###################################################################
## NOTAS:
## - No cuenta prefacturas.
## - El criterio de búsqueda para facturas es la fecha de factura,
## no la fecha de cobro. El por qué es muy sencillo: el objetivo
## de la consulta es ver el montante de cada LDV por familia y
## su beneficio. Dado que una factura puede tener cobrarse en
## varios vencimientos, ¿qué parte del cobro va para cada LDV si
## aún no se ha cobrado por completo?
## - Solo cuenta lineas de venta, no servicios (que además no se
## pueden vender por TPV, ergo no tienen ticket).
###################################################################
## Changelog:
##
###################################################################
##
###################################################################
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time,
|
sqlobject
import sys, os
try:
import pclases
except ImportError:
sys.path.append(os.path.join('..', 'framework'))
import pc
|
lases
import datetime
try:
import geninformes
except ImportError:
sys.path.append(os.path.join('..', 'informes'))
import geninformes
try:
from treeview2pdf import treeview2pdf
except ImportError:
sys.path.append(os.path.join("..", "informes"))
from treeview2pdf import treeview2pdf
try:
from treeview2csv import treeview2csv
except ImportError:
sys.path.append(os.path.join("..", "informes"))
from treeview2pdf import treeview2pdf
from informes import abrir_pdf, abrir_csv
import ventana_progreso
class ConsultaBeneficioTicket(Ventana):
def __init__(self, objeto = None, usuario = None):
self.usuario = usuario
Ventana.__init__(self, 'consulta_ventas_ticket.glade', objeto)
connections = {'b_salir/clicked': self.salir,
'b_buscar/clicked': self.buscar,
'b_imprimir/clicked': self.imprimir,
'b_exportar/clicked': self.exportar,
'b_fecha_inicio/clicked': self.set_inicio,
'b_fecha_fin/clicked': self.set_fin}
self.add_connections(connections)
cols = (('Fecha', 'gobject.TYPE_STRING', False, True, False, None),
('T./Alb./Fra.','gobject.TYPE_STRING',False,True,True,None),
('Imp. total', 'gobject.TYPE_STRING',False,True,False,None),
('Imp. (s/IVA)','gobject.TYPE_STRING',False,True,False,None),
('Ben. sobre tarifa', 'gobject.TYPE_STRING',
False, True, False, None),
('ID','gobject.TYPE_STRING', False, False, False, None))
utils.preparar_treeview(self.wids['tv_datos'], cols)
for col in self.wids['tv_datos'].get_columns()[2:]:
for cell in col.get_cell_renderers():
cell.set_property("xalign", 1.0)
col.set_alignment(0.5)
self.wids['tv_datos'].connect("row-activated", self.abrir_producto)
self.fin = datetime.date.today()
self.inicio = self.fin
self.wids['e_fechafin'].set_text(utils.str_fecha(self.fin))
self.wids['e_fechainicio'].set_text(utils.str_fecha(self.inicio))
gtk.main()
def abrir_producto(self, tv, path, vc):
"""
Abre el producto al que se le ha hecho doble clic en una ventana nueva.
"""
model = tv.get_model()
tipo_e_id = model[path][-1]
if "LDV" in tipo_e_id:
tipo, id = tipo_e_id.split(':')
ldv = pclases.LineaDeVenta.get(id)
producto = ldv.producto
if isinstance(producto, pclases.ProductoVenta):
if producto.es_rollo():
import productos_de_venta_rollos
ventana_producto = productos_de_venta_rollos.ProductosDeVentaRollos(producto, usuario = self.usuario)
elif producto.es_bala() or producto.es_bigbag():
import productos_de_venta_balas
ventana_producto = productos_de_venta_balas.ProductosDeVentaBalas(producto, usuario = self.usuario)
elif isinstance(producto, pclases.ProductoCompra):
import productos_compra
ventana_producto = productos_compra.ProductosCompra(producto, usuario = self.usuario)
def chequear_cambios(self):
pass
def rellenar_tabla(self, resultados):
"""
Rellena el model con los items de la consulta
"""
model = self.wids['tv_datos'].get_model()
model.clear()
totfact = totsiniva = totbeneficio = totbeneficio_cobro = 0.0
self.wids['tv_datos'].freeze_child_notify()
self.wids['tv_datos'].set_model(None)
totcobrado = totpendiente = 0.0
total_costo = total_costo_cobrado = 0.0
for material in resultados:
if material != None:
nombre_mat = material.descripcion
else:
nombre_mat = ""
padre_mat = model.append(None, (nombre_mat,
"",
"0",
"0",
"0",
"M:%d" % (material
and material.id
or -1)))
for fecha in resultados[material]:
if fecha != None:
str_fecha = utils.str_fecha(fecha)
else:
str_fecha = ""
padre_fec = model.append(padre_mat, (str_fecha,
"",
"0",
"0",
"0",
""))
for ldv in resultados[material][fecha]:
subtotal = ldv.get_subtotal(iva = True)
subtotal_siva = ldv.get_subtotal(iva = False)
beneficio = ldv.calcular_beneficio()
costo = ldv.calcular_precio_costo() * ldv.cantidad
if ldv.f
|
peto2006/sortiment-frontent
|
sortimentGUI/__init__.py
|
Python
|
mit
| 104 | 0.009615 |
__
|
all__ = ['gtk_element_editor', 'main_window_handler', 'sortiment', 'window_creat
|
or', 'error_handler']
|
tylertian/Openstack
|
openstack F/nova/nova/tests/cert/test_rpcapi.py
|
Python
|
apache-2.0
| 3,147 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.cert.rpcapi
"""
from nova.cert import rpcapi as cert_rpcapi
from nova import context
from nova import flags
from nova.openstack.common import rpc
from nova import test
FLAGS = flags.FLAGS
class CertRpcAPITestCase(test.TestCase):
def setUp(self):
super(CertRpcAPITestCase, self).setUp()
def tearDown(self):
super(CertRpcAPITestCase, self).tearDown()
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
expected_retval = 'foo'
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
self.call_ctxt = None
self.call_topic = None
self.call_msg = None
self.call_timeout = None
def _fake_call(_ctxt, _topic, _msg, _timeout):
self.call_ctxt = _ctxt
self.call_topic = _topic
self.call_msg = _msg
self.call_timeout = _timeout
return expected_retval
self.stubs.Set(rpc, 'call', _fake_call)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
|
self.a
|
ssertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
self.assertEqual(self.call_topic, FLAGS.cert_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
|
rtfd/readthedocs.org
|
readthedocs/analytics/apps.py
|
Python
|
mit
| 224 | 0 |
"""Django app config for the analytics app."""
from django.apps import AppConfig
class AnalyticsAppConfig(AppCon
|
fig):
"""Analytics app init code."""
name = 'readthedocs.analytics'
verbose_name = 'Ana
|
lytics'
|
Anthony25/barython
|
barython/hooks/audio.py
|
Python
|
bsd-3-clause
| 363 | 0 |
#!/usr/bin/env python3
import logging
from . import SubprocessHook
logger = logging.getLogger("barython")
class PulseAudioHook(SubprocessHook):
"""
Listen on pulseaudio events with pactl
"""
def __init__(self, cmd=["pactl", "subscribe", "-n", "barython"],
*args, **kwargs):
super().__init__(*args, **kwar
|
gs, cmd=cmd)
|
|
sondree/Master-thesis
|
Python Simulator/simulator/FMM.py
|
Python
|
gpl-3.0
| 2,479 | 0.020976 |
import numpy as np
import pygame
from sklearn.mixture import GMM
from math import sqrt, atan, pi
def emFit(results, numComponents):
if len(results) == 0:
return None
m =np.matrix(results)
gmm = GMM(numComponents,covariance_type='full', n_iter= 100, n_init = 4)
gmm.fit(results)
components = []
for componentID in xrange(numComponents):
mu = gmm.means_[componentID]
cov = gmm.covars_[componentID]
proba = gmm.weights_[componentID]
components.append((mu,cov,proba))
components = sorted(components,key=lambda x: x[0][0])
return components
def drawComponents(surface, windowSize, scaleFactor, components):
if components is None:
return
colors = [(255, 150, 150),(150, 150, 255),(150, 255, 150)]
|
for color,(mu,cov, proba) in zip(colors[:len(components)],components):
eigenvalues, eigenvectors = np.linalg.eig(cov)
major = 2.0 * sqrt(5.991 * eigenvalues.max())
minor = 2.0 * sqrt(5.991 * eigenvalues.min())
angle1 = atan(eigenvectors[1][0]/eigenvectors[0][0])
angle2 = atan(eigenvectors[1][1]/eigenvectors[0][1])
if eigenvalues[0] > eigenvalues[1]:
angle = angle1
else:
|
angle = angle2
mu_x,mu_y = mu
if major < 1.0 or minor < 1.0:
continue
s = pygame.Surface((major*scaleFactor[0], minor*scaleFactor[1]),pygame.SRCALPHA, 32)
ellipse = pygame.draw.ellipse(s, color, (0, 0, major*scaleFactor[0], minor*scaleFactor[0]))
s2 = pygame.transform.rotate(s, angle*360.0/(2.0*pi))
height, width = s2.get_rect().height,s2.get_rect().width
surface.blit(s2,(mu_x*scaleFactor[0]-width/2.0,mu_y*scaleFactor[1]-height/2.0))#(mu_x*scaleFactor[0]-height/2.0,mu_y*scaleFactor[1]-width/2.0))
#s = pygame.Surface((major*scaleFactor[0], minor*scaleFactor[1]))
#s.fill((255,255,255))
#s.set_alpha(128)
#ellipse = pygame.draw.ellipse(s, blue, (0, 0, major*scaleFactor[0], minor*scaleFactor[0]))
#s3 = pygame.transform.rotate(s, angle1*360.0/(2.0*pi))
#height, width = s3.get_rect().height,s3.get_rect().width
#surface.blit(s3,(mu_x*scaleFactor[0]-width/2.0,mu_y*scaleFactor[1]-height/2.0))#(mu_x*scaleFactor[0]-height/2.0,mu_y*scaleFactor[1]-width/2.0))
#surface.blit(s,(0,0))
#print angle*360.0/(2.0*pi)
|
kiyukuta/chainer
|
tests/chainer_tests/initializer_tests/test_uniform.py
|
Python
|
mit
| 1,548 | 0 |
import unittest
from chainer import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import numpy
@testing.parameterize(*testing.product({
'target':
|
[
initializers.Uniform,
initializers.LeCunUniform,
initializers.HeUniform,
initializers.GlorotUniform,
],
'shape': [(2, 3), (2, 3, 4)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestUniform(unittest.TestCase):
scale = 0.1
def check_initializer(self, w):
initializer = self
|
.target(scale=self.scale)
initializer(w)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = self.target(scale=self.scale, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(cuda.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
testing.run_module(__name__, __file__)
|
spanner888/madparts
|
setup.py
|
Python
|
gpl-3.0
| 3,599 | 0.012781 |
#!/usr/bin/env python
#
# (c) 2013 Joost Yervante Damad <joost@damad.be>
# License: GPL
VERSION='1.2.1'
import glob, sys, platform
from setuptools import setup
with open('README.md') as file:
long_description = file.read()
arch = platform.uname()[4]
extra_data_files = []
if sys.platform == 'darwin':
OPTIONS = {
'argv_emulation': True,
#'includes': ['sip', 'PyQt4', 'PyQt4.QtCore', 'PyQt4.QtGui', 'simplejson'],
#'excludes': ['PyQt4.QtDesigner', 'PyQt4.QtNetwork', 'PyQt4.QtOpenGL', 'PyQt4.QtScript', 'PyQt4.QtSql', 'PyQt4.QtTest', 'PyQt4.QtWebKit', 'PyQt4.QtXml', 'PyQt4.phonon'],
|
}
extra_options = dict(
setup_requires=['py2app'],
app=['madparts'],
# Cross-platform applications generally expect sys.argv to
# be used for opening files.
options=dict(py2app=OPTIONS),
)
elif sys.platform == 'win32':
import py2exe
OPTIONS = {
'includes': [
"OpenGL.arrays._buffers",
"OpenGL.arrays._numeric",
"OpenGL.arrays._strings",
"Ope
|
nGL.arrays.arraydatatype",
"OpenGL.arrays.arrayhelpers",
"OpenGL.arrays.buffers",
"OpenGL.arrays.ctypesarrays",
"OpenGL.arrays.ctypesparameters",
"OpenGL.arrays.ctypespointers",
"OpenGL.arrays.formathandler",
"OpenGL.arrays.lists",
"OpenGL.arrays.nones",
"OpenGL.arrays.numbers",
"OpenGL.arrays.numeric",
"OpenGL.arrays.numericnames",
"OpenGL.arrays.numpymodule",
"OpenGL.arrays.strings",
"OpenGL.arrays.vbo",
"OpenGL.platform.ctypesloader",
"OpenGL.platform.win32",
"OpenGL_accelerate.formathandler",
"OpenGL_accelerate.arraydatatype",
"OpenGL_accelerate.errorchecker",
"OpenGL_accelerate.latebind",
"OpenGL_accelerate.nones_formathandler",
"OpenGL_accelerate.numpy_formathandler",
"OpenGL_accelerate.vbo",
"OpenGL_accelerate.wrapper",
]
}
extra_data_files = ['msvcp90.dll',]
extra_options = dict(
setup_requires=['py2exe'],
console=['madparts'],
options=dict(py2exe=OPTIONS)
)
elif sys.platform.startswith('linux'):
extra_options = dict(
# Normally unix-like platforms will use "setup.py install"
# and install the main script as such
scripts=['madparts'],
)
if not arch in ['x86_64']:
raise Exception("unsupported arch %s" % (arch))
else:
raise Exception("unsupported platform %s" % (sys.platform))
setup(
name = 'madparts',
description = 'a functional footprint editor',
long_description = long_description,
author = 'Joost Yervante Damad',
author_email = 'joost@damad.be',
version = VERSION,
url = 'http://madparts.org/',
packages = [
'coffee',
'export',
'gui',
'inter',
'main',
'syntax',
'mutil',
],
package_data= {
'gui': [
'../COPYING', '../README.md', # dirty trick ;)
],
},
data_files = [
('share/madparts/examples', glob.glob('examples/*.coffee')),
('share/madparts/grind', glob.glob('grind/*.coffee')),
('share/madparts/coffeescript', ['coffeescript/LICENSE', 'coffeescript/README'] + glob.glob('coffeescript/*.js')),
('share/madparts/shaders', glob.glob('shaders/*.vert') + glob.glob('shaders/*.frag')),
('share/madparts/gui', ['gui/freefont.COPYING', 'gui/FreeMonoBold.ttf'] ),
] + extra_data_files,
platforms = ["Windows", "Linux", "Mac OS-X"],
**extra_options
)
|
AndreMiras/pycaw
|
examples/volume_callback_example.py
|
Python
|
mit
| 950 | 0 |
"""
IAudioEndpointVolumeCallback.OnNotify() example.
The OnNotify()
|
callback method gets called on volume change.
"""
from __future__ import print_function
from ctypes impor
|
t POINTER, cast
from comtypes import CLSCTX_ALL, COMObject
from pycaw.pycaw import (AudioUtilities, IAudioEndpointVolume,
IAudioEndpointVolumeCallback)
class AudioEndpointVolumeCallback(COMObject):
_com_interfaces_ = [IAudioEndpointVolumeCallback]
def OnNotify(self, pNotify):
print('OnNotify callback')
def main():
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
callback = AudioEndpointVolumeCallback()
volume.RegisterControlChangeNotify(callback)
for i in range(3):
volume.SetMute(0, None)
volume.SetMute(1, None)
if __name__ == "__main__":
main()
|
avtomato/HackerRank
|
Algorithms/_03_Strings/_04_Caesar_Cipher/solution.py
|
Python
|
mit
| 233 | 0 |
#
|
!/bin/python3
import sys
n = int(input().strip())
s = input().strip()
k = int(input().strip())
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+k) % 26 + c)
print(''.join([d.get(c, c) for c in s]))
| |
MagicStack/httptools
|
setup.py
|
Python
|
mit
| 7,252 | 0 |
import sys
vi = sys.version_info
if vi < (3, 5):
raise RuntimeError('httptools require Python 3.5 or greater')
else:
import os.path
import pathlib
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as build_ext
CFLAGS = ['-O2']
ROOT = pathlib.Path(__file__).parent
CYTHON_DEPENDENCY = 'Cython(>=0.29.24,<0.30.0)'
class httptools_build_ext(build_ext):
user_options = build_ext.user_options + [
('cython-always', None,
'run cythonize() even if .c files are present'),
('cython-annotate', None,
'Produce a colorized HTML version of the Cython source.'),
('cython-directives=', None,
'Cythion compiler directives'),
('use-system-llhttp', None,
'Use the system provided llhttp, instead of the bundled one'),
('use-system-http-parser', None,
'Use the system provided http-parser, instead of the bundled one'),
]
boolean_options = build_ext.boolean_options + [
'cython-always',
'cython-annotate',
'use-system-llhttp',
'use-system-http-parser',
]
def initialize_options(self):
# initialize_options() ma
|
y be called multiple times on the
# same command object, so make sure not to override previously
# set options.
if getattr(self, '_initialized', False):
return
super().initialize_options()
self.use_system_llhttp = False
self.use_system_http_parser = Fal
|
se
self.cython_always = False
self.cython_annotate = None
self.cython_directives = None
def finalize_options(self):
# finalize_options() may be called multiple times on the
# same command object, so make sure not to override previously
# set options.
if getattr(self, '_initialized', False):
return
need_cythonize = self.cython_always
cfiles = {}
for extension in self.distribution.ext_modules:
for i, sfile in enumerate(extension.sources):
if sfile.endswith('.pyx'):
prefix, ext = os.path.splitext(sfile)
cfile = prefix + '.c'
if os.path.exists(cfile) and not self.cython_always:
extension.sources[i] = cfile
else:
if os.path.exists(cfile):
cfiles[cfile] = os.path.getmtime(cfile)
else:
cfiles[cfile] = 0
need_cythonize = True
if need_cythonize:
try:
import Cython
except ImportError:
raise RuntimeError(
'please install Cython to compile httptools from source')
if Cython.__version__ < '0.29':
raise RuntimeError(
'httptools requires Cython version 0.29 or greater')
from Cython.Build import cythonize
directives = {}
if self.cython_directives:
for directive in self.cython_directives.split(','):
k, _, v = directive.partition('=')
if v.lower() == 'false':
v = False
if v.lower() == 'true':
v = True
directives[k] = v
self.distribution.ext_modules[:] = cythonize(
self.distribution.ext_modules,
compiler_directives=directives,
annotate=self.cython_annotate)
super().finalize_options()
self._initialized = True
def build_extensions(self):
mod_parser, mod_url_parser = self.distribution.ext_modules
if self.use_system_llhttp:
mod_parser.libraries.append('llhttp')
if sys.platform == 'darwin' and \
os.path.exists('/opt/local/include'):
# Support macports on Mac OS X.
mod_parser.include_dirs.append('/opt/local/include')
else:
mod_parser.include_dirs.append(
str(ROOT / 'vendor' / 'llhttp' / 'include'))
mod_parser.include_dirs.append(
str(ROOT / 'vendor' / 'llhttp' / 'src'))
mod_parser.sources.append('vendor/llhttp/src/api.c')
mod_parser.sources.append('vendor/llhttp/src/http.c')
mod_parser.sources.append('vendor/llhttp/src/llhttp.c')
if self.use_system_http_parser:
mod_url_parser.libraries.append('http_parser')
if sys.platform == 'darwin' and \
os.path.exists('/opt/local/include'):
# Support macports on Mac OS X.
mod_url_parser.include_dirs.append('/opt/local/include')
else:
mod_url_parser.include_dirs.append(
str(ROOT / 'vendor' / 'http-parser'))
mod_url_parser.sources.append(
'vendor/http-parser/http_parser.c')
super().build_extensions()
with open(str(ROOT / 'README.md')) as f:
long_description = f.read()
with open(str(ROOT / 'httptools' / '_version.py')) as f:
for line in f:
if line.startswith('__version__ ='):
_, _, version = line.partition('=')
VERSION = version.strip(" \n'\"")
break
else:
raise RuntimeError(
'unable to read the version from httptools/_version.py')
setup_requires = []
if (not (ROOT / 'httptools' / 'parser' / 'parser.c').exists() or
'--cython-always' in sys.argv):
# No Cython output, require Cython to build.
setup_requires.append(CYTHON_DEPENDENCY)
setup(
name='httptools',
version=VERSION,
description='A collection of framework independent HTTP protocol utils.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/MagicStack/httptools',
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Environment :: Web Environment',
'Development Status :: 5 - Production/Stable',
],
platforms=['macOS', 'POSIX', 'Windows'],
python_requires='>=3.5.0',
zip_safe=False,
author='Yury Selivanov',
author_email='yury@magic.io',
license='MIT',
packages=['httptools', 'httptools.parser'],
cmdclass={
'build_ext': httptools_build_ext,
},
ext_modules=[
Extension(
"httptools.parser.parser",
sources=[
"httptools/parser/parser.pyx",
],
extra_compile_args=CFLAGS,
),
Extension(
"httptools.parser.url_parser",
sources=[
"httptools/parser/url_parser.pyx",
],
extra_compile_args=CFLAGS,
),
],
include_package_data=True,
test_suite='tests.suite',
setup_requires=setup_requires,
extras_require={
'test': [
CYTHON_DEPENDENCY
]
}
)
|
sfu-fas/coursys
|
faculty/event_types/position.py
|
Python
|
gpl-3.0
| 2,366 | 0.000845 |
from django import forms
from faculty.event_types.base import BaseEntryForm
from faculty.event_types.base import CareerEventHandlerBase
from faculty.event_types.choices import Choices
from faculty.event_types.base import TeachingAdjust
from faculty.event_types.fields import TeachingCreditField
from faculty.event_types.mixins import TeachingCareerEvent
from faculty
|
.event_types.search import ChoiceSearchRule
from faculty.event_types.search import ComparableSearchRule
class AdminPositionEventHandler(CareerEventHandlerBase, TeachingCareerEvent):
"""
Given admin position
"""
EVENT_TYPE = 'ADMINPOS'
NAME = 'Admin Position'
TO_HTML_TEMPLATE = """
{
|
% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Position</dt><dd>{{ handler|get_display:'position' }}</dd>
<dt>Teaching Credit</dt><dd>{{ handler|get_display:'teaching_credit' }}</dd>
{% endblock %}
"""
class EntryForm(BaseEntryForm):
POSITIONS = Choices(
('UGRAD_DIRECTOR', 'Undergrad Program Director'),
('GRAD_DIRECTOR', 'Graduate Program Director'),
('DDP_DIRECTOR', 'Dual-Degree Program Director'),
('ASSOC_DIRECTOR', 'Associate Director/Chair'),
('DIRECTOR', 'School Director/Chair'),
('ASSOC_DEAN', 'Associate Dean'),
('DEAN', 'Dean'),
('OTHER', 'Other Admin Position'),
)
position = forms.ChoiceField(required=True, choices=POSITIONS)
teaching_credit = TeachingCreditField(required=False, initial=None)
SEARCH_RULES = {
'position': ChoiceSearchRule,
'teaching_credit': ComparableSearchRule,
}
SEARCH_RESULT_FIELDS = [
'position',
'teaching_credit',
]
def get_position_display(self):
return self.EntryForm.POSITIONS.get(self.get_config('position'), 'N/A')
def get_teaching_credit_display(self):
return self.get_config('teaching_credit', default='N/A')
@classmethod
def default_title(cls):
return 'Admin Position'
def short_summary(self):
position = self.get_position_display()
return 'Admin Position: {0}'.format(position)
def teaching_adjust_per_semester(self):
credit = self.get_config('teaching_credit', 0)
return TeachingAdjust(credit, 0)
|
thaim/ansible
|
lib/ansible/modules/cloud/kubevirt/kubevirt_template.py
|
Python
|
mit
| 14,884 | 0.004367 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_template
short_description: Manage KubeVirt templates
description:
- Use Openshift Python SDK to manage the state of KubeVirt templates.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
name:
description:
- Name of the Template object.
required: true
type: str
namespace:
description:
- Namespace where the Template object exists.
required: true
type: str
objects:
description:
- List of any valid API objects, such as a I(DeploymentConfig), I(Service), etc. The object
will be created exactly as defined here, with any parameter values substituted in prior to creation.
The definition of these objects can reference parameters defined earlier.
- As part of the the list user can pass also I(VirtualMachine) kind. When passing I(VirtualMachine)
user must use Ansible structure of the parameters not the Kubernetes API structure. For more information
please take a look at M(kubevirt_vm) module and at EXAMPLES section, where you can see example.
type: list
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By default, the strategic
merge will typically be used.
type: list
choices: [ json, merge, strategic-merge ]
display_name:
description:
- "A brief, user-friendly name, which can be employed by user interfaces."
type: str
description:
description:
- A description of the template.
- Include enough detail that the user will understand what is being deployed...
and any caveats they need to know before deploying. It should also provide links to additional information,
such as a README file."
type: str
long_description:
description:
- "Additional template description. This may be displayed by the service catalog, for example."
type: str
provider_display_name:
description:
- "The name of the person or organization providing the template."
type: str
documentation_url:
description:
- "A URL referencing further documentation for the template."
type: str
support_url:
description:
- "A URL where support can be obtained for the template."
type: str
editable:
description:
- "Extension for hinting at which elements should be considered editable.
List of jsonpath selectors. The jsonpath root is the objects: element of the template."
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: list
default_disk:
description:
- "The goal of default disk is to define what kind of disk is supported by the OS mainly in
terms of bus (ide, scsi, sata, virtio, ...)"
- The C(default_disk) parameter define configuration overlay for disks that will be applied on top of disks
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: dict
default_volume:
description:
- "The goal of default volume is to be able to configure mostly performance parameters like
caches if those are exposed by the underlying volume implementation."
- The C(default_volume) parameter define configuration overlay for volumes that will be applied on top of volumes
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: dict
default_nic:
description:
- "The goal of default network is similar to I(default_disk) and should be used as a template
to ensure OS compatibility and performance."
- The C(default_nic) parameter define configuration overlay for nic that will be applied on top of nics
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: dict
default_network:
description:
- "The goal of default network is similar to I(default_volume) and should be used as a template
that specifies performance and connection parameters (L2 bridge for example)"
- The C(default_network) parameter define configuration overlay for networks that will be applied on top of networks
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your o
|
penshift cluster.
type: dict
icon_class:
description:
- "An icon to be displayed with your template in the web console. Choose from our existing logo
icons when possible. You can also use icons from FontAwesome. Alternatively, provide icons through
CSS customizations that can be added to an OpenShift Contai
|
ner Platform cluster that uses your template.
You must specify an icon class that exists, or it will prevent falling back to the generic icon."
type: str
parameters:
description:
- "Parameters allow a value to be supplied by the user or generated when the template is instantiated.
Then, that value is substituted wherever the parameter is referenced. References can be defined in any
field in the objects list field. This is useful for generating random passwords or allowing the user to
supply a host name or other user-specific value that is required to customize the template."
- "More information can be found at: U(https://docs.openshift.com/container-platform/3.6/dev_guide/templates.html#writing-parameters)"
type: list
version:
description:
- Template structure version.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: str
extends_documentation_fragment:
- k8s_auth_options
- k8s_state_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create template 'mytemplate'
kubevirt_template:
state: present
name: myvmtemplate
namespace: templates
display_name: Generic cirros template
description: Basic cirros template
long_description: Verbose description of cirros template
provider_display_name: Just Be Cool, Inc.
documentation_url: http://theverycoolcompany.com
support_url: http://support.theverycoolcompany.com
icon_class: icon-linux
default_disk:
disk:
bus: virtio
default_nic:
model: virtio
default_network:
resource:
resourceName: bridge.network.kubevirt.io/cnvmgmt
default_volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
objects:
- name: ${NAME}
kind: VirtualMachine
memory: ${MEMORY_SIZE}
state: present
namespace: vms
parameters:
- name: NAME
description: VM name
generate: expression
from: 'vm-[A-Za-z0-9]{8}'
- name: MEM
|
diegojromerolopez/djanban
|
src/djanban/apps/hourly_rates/forms.py
|
Python
|
mit
| 1,137 | 0.001759 |
# -*- coding: utf-8 -*-
from __future__
|
import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import models
from djanban.apps.hourly_rates.models import HourlyRate
from django import forms
# Hourly rate creation and edition form
class HourlyRateForm(models.ModelForm):
class Meta:
model = HourlyRa
|
te
fields = ["name", "start_date", "end_date", "amount", "is_active"]
widgets = {
'start_date': forms.SelectDateWidget(),
'end_date': forms.SelectDateWidget(empty_label=u"Until now"),
}
def __init__(self, *args, **kwargs):
super(HourlyRateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(HourlyRateForm, self).clean()
if cleaned_data.get("end_date") and cleaned_data.get("start_date") > cleaned_data.get("end_date"):
raise ValidationError(u"Start date can't be greater that end date")
return cleaned_data
class DeleteHourlyRateForm(forms.Form):
confirmed = forms.BooleanField(label=u"Please confirm you really want to do this action", required=True)
|
JoeJimFlood/NFLPrediction2014
|
matchup.py
|
Python
|
mit
| 10,272 | 0.007496 |
import os
import sys
import pandas as pd
import numpy as np
from numpy.random import poisson, uniform
from numpy import mean
import time
import math
po = True
teamsheetpath = sys.path[0] + '/teamcsvs/'
compstat = {'TDF': 'TDA', 'TDA': 'TDF', #Dictionary to use to compare team stats with opponent stats
'FGF': 'FGA', 'FGA': 'FGF',
'SFF': 'SFA', 'SFA': 'SFF',
'PAT1%F': 'PAT1%A', 'PAT1%A': 'PAT1%F',
'PAT2%F': 'PAT2%A', 'PAT2%A': 'PAT2%F'}
def get_opponent_stats(opponent): #Gets summaries of statistics for opponent each week
opponent_stats = {}
global teamsheetpath
opp_stats = pd.DataFrame.from_csv(teamsheetpath + opponent + '.csv')
for stat in opp_stats.columns:
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
opponent_stats.update({stat: opp_stats[stat].mean()})
try:
opponent_stats.update({'PAT1%F': float(opp_stats['PAT1FS'].sum()) / opp_stats['PAT1FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%F': .99})
try:
opponent_stats.update({'PAT2%F': float(opp_stats['PAT2FS'].sum()) / opp_stats['PAT2FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%F': .5})
try:
opponent_stats.update({'PAT1%A': float(opp_stats['PAT1AS'].sum()) / opp_stats['PAT1AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%A': .99})
try:
opponent_stats.update({'PAT2%A': float(opp_stats['PAT2AS'].sum()) / opp_stats['PAT2AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%A': .5})
return opponent_stats
def get_residual_performance(team): #Get how each team has done compared to the average performance of their opponents
global teamsheetpath
score_df = pd.DataFrame.from_csv(teamsheetpath + team + '.csv')
residual_stats = {}
score_df['PAT1%F'] = np.nan
score_df['PAT2%F'] = np.nan
score_df['PAT1%A'] = np.nan
score_df['PAT2%A'] = np.nan
for week in score_df.index:
try:
score_df['PAT1%F'][week] = float(score_df['PAT1FS'][week]) / score_df['PAT1FA'][week]
except ZeroDivisionError:
score_df['PAT1%F'][week] = 0.99
#print ('For: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%F'][week] = float(score_df['PAT2FS'][week]) / score_df['PAT2FA'][week]
except ZeroDivisionError:
score_df['PAT2%F'][week] = 0.5
try:
score_df['PAT1%A'][week] = float(score_df['PAT1AS'][week]) / score_df['PAT1AA'][week]
except ZeroDivisionError:
score_df['PAT1%A'][week] = 0.99
#print ('Against: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%A'][week] = float(score_df['PAT2AS'][week]) / score_df['PAT2AA'][week]
except ZeroDivisionError:
score_df['PAT2%A'][week] = 0.5
opponent_stats = get_opponent_stats(score_df['OPP'][week])
for stat in opponent_stats:
if week == 1:
score_df['OPP_' + stat] = np.nan
score_df['OPP_' + stat][week] = opponent_stats[stat]
for stat in opponent_stats:
score_df['R_' + stat] = score_df[stat] - score_df['OPP_' + compstat[stat]]
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
residual_stats.update({stat: score_df['R_' + stat].mean()})
elif stat == 'PAT1%F':
residual_stats.update({stat: (score_df['R_PAT1%F'].multiply(score_df['PAT1FA'])).sum() / score_df['PAT1FA'].sum()})
elif stat == 'PAT2%F':
residual_stats.update({stat: (score_df['R_PAT2%F'].multiply(score_df['PAT2FA'])).sum() / score_df['PAT2FA'].sum()})
elif stat == 'PAT1%A':
residual_stats.update({stat: (score_df['R_PAT1%A'].multiply(score_df['PAT1AA'])).sum() / score_df['PAT1AA'].sum()})
elif stat == 'PAT2%A':
residual_stats.update({stat: (score_df['R_PAT2%A'].multiply(score_df['PAT2AA'])).sum() / score_df['PAT2AA'].sum()})
try:
residual_stats.update({'GOFOR2': float(score_df['PAT2FA'].sum()) / score_df['TDF'].sum()})
except ZeroDivisionError:
residual_stats.update({'GOFOR2': .1})
#print team
#print residual_stats
return residual_stats
def get_score(expected_scores): #Get the score for a team based on expected scores
score = 0
if expected_scores['TD'] > 0:
tds = poisson(expected_scores['TD'])
else:
tds = poisson(0.01)
score = score + 6 * tds
if expected_scores['FG'] > 0:
fgs = poisson(expected_scores['FG'])
else:
fgs = poisson(0.01)
score = score + 3 * fgs
if expected_scores['S'] > 0:
sfs = poisson(expected_scores['S'])
else:
sfs = poisson(0.01)
score = score + 2 * sfs
for td in range(tds):
go_for_2_determinant = uniform(0, 1)
if go_for_2_determinant <= expected_scores['GOFOR2']: #Going for 2
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT2PROB']:
score = score + 2
else:
continue
else: #Going for 1
#print(expected_scores['PAT1PROB'])
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT1PROB']:
score = score + 1
else:
continue
return score
def game(team_1, team_2,
expected_scores_1, expected_scores_2,
playoff): #Get two scores and determine a winner
score_1 = get_score(expected_scores_1)
score_2 = get_score(expected_scores_2)
if score_1 > score_2:
win_1 = 1
win_2 = 0
draw_1 = 0
draw_2 = 0
elif score_2 > score_1:
win_1 = 0
win_2 = 1
draw_1 = 0
draw_2 = 0
else:
if pla
|
yoff:
win_1 = 0.5
win_2 = 0.5
draw_1 = 0
draw_2 = 0
else:
win_1 = 0
win_2 = 0
draw_1 = 1
draw_2 = 1
summary = {team_1: [win_1, draw_1, score_1]}
summary.update({team_2: [win_2, draw_2,
|
score_2]})
return summary
def get_expected_scores(team_1_stats, team_2_stats, team_1_df, team_2_df): #Get the expected scores for a matchup based on the previous teams' performances
expected_scores = {}
for stat in team_1_stats:
expected_scores.update({'TD': mean([team_1_stats['TDF'] + team_2_df['TDA'].mean(),
team_2_stats['TDA'] + team_1_df['TDF'].mean()])})
expected_scores.update({'FG': mean([team_1_stats['FGF'] + team_2_df['FGA'].mean(),
team_2_stats['FGA'] + team_1_df['FGF'].mean()])})
expected_scores.update({'S': mean([team_1_stats['SFF'] + team_2_df['SFA'].mean(),
team_2_stats['SFA'] + team_1_df['SFF'].mean()])})
#print mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
# team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
expected_scores.update({'GOFOR2': team_1_stats['GOFOR2']})
pat1prob = mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
if not math.isnan(pat1prob):
expected_scores.update({'PAT1PROB': pat1prob})
else:
expected_scores.update({'PAT1PROB': 0.99})
#print(expected_scores['PAT1PROB'])
pat2prob = mean([team_1_stats['PAT2%F'] + team_2_df['PAT2AS'].astype('float').sum() / team_2_df['PAT2AA'].sum(),
team_2_stats['PAT2%A'] + team_1_df['PAT2FS'].astype('float').sum() / team_1_df['PAT2FA'].sum()])
if not math.isnan(pat2prob):
expected_scores.update({'PAT2PROB': pat2prob})
else:
expected_scores.u
|
chrisspen/asklet
|
asklet/management/commands/asklet_load_conceptnet.py
|
Python
|
lgpl-3.0
| 9,742 | 0.005646 |
#!/usr/bin/python
from __future__ import print_function
import random
import re
import datetime
import os
import sys
import time
from optparse import make_option
import urllib2
import tarfile
from multiprocessing import cpu_count
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection, reset_queries, OperationalError
from django.db.transaction import commit_on_success, commit
from django.utils import timezone
from joblib import Parallel, delayed
from six.moves import cPickle as pickle
from six.moves import range as xrange
from six.moves import input as raw_input
from six import u
from asklet import constants as c
from asklet.utils import MatrixUser
from asklet import models
class ConceptNetEdge(object):
def __init__(self, *args):
fields = [
'uri', 'rel', 'start', 'end', 'context',
'weight', 'sources', 'id', 'dataset', 'surfaceText']
# print('fields:',fields)
# print('args:',args)
assert len(args) == len(fields), '%i != %i' % (len(args), len(fields))
self.__dict__.update(zip(fields, args))
self.surfaceText = self.surfaceText.strip()
@classmethod
def from_string(cls, s):
return cls(*s.split('\t'))
@property
def surface_parts(self):
text = self.surfaceText
parts = [_ for _ in re.split('\[\[|\]\]', text) if _.strip()]
if len(parts) == 3:
return parts
@property
def target_text(self):
parts = self.surface_parts
if parts:
return parts[0].strip()
text = re.sub('[^a-zA-Z0-9]+', ' ', self.start.split('/')[-1])
text = re.sub('[ ]+', ' ', text)
return text
@property
def target_slug(self):
return self.start
@property
def question_text(self):
parts = self.surface_parts
if parts:
return '[%s] [%s]' % (parts[1].strip(), parts[2].strip())
#Not reliable. Makes broken segments.
# text = re.sub('[^a-zA-Z0-9]+', ' ', self.rel.split('/')[-1].lower() + ' ' + self.end.split('/')[-1])
# text = re.sub('[ ]+', ' ', text)
# return text
@property
def question_slug(self):
return '%s,%s' % (self.rel, self.end)
@property
def weight_int(self):
#typical=1, our typical is 2
weight = float(self.weight)*2
weight = min(max(weight, c.NO), c.YES)
return int(round(weight*1000))
def __str__(self):
return '%s->%s->%s' % (self.start, self.rel, self.end)
def download_concept():
base = 'http://conceptnet5.media.mit.edu/downloads/current/'
html = urllib2.urlopen(base).read()
local_dir = '/tmp'
matches = re.findall('"(conceptnet5_csv_[^"]+)"', html)
if matches:
fn = matches[-1]
local_fqfn = os.path.join(local_dir, fn)
if os.path.isfile(local_fqfn):
print('File %s already downloaded' % local_fqfn)
return local_fqfn
url = base + fn
print('Downloading %s...' % url)
os.system('wget --directory-prefix=/tmp %s' % url)
return local_fqfn
else:
print(('No Conceptnet URL found! Perhaps the '
'page %s has changed?') % base, file=sys.stderr)
@commit_on_success
def process(fn, part_name, domain_slug, commit_freq=10):
print('%s: Processing...' % part_name)
connection.close()
domain = models.Domain.objects.get(slug=domain_slug)
models.SET_QUESTION_INDEX = False
models.SET_TARGET_INDEX = False
fi, _ = models.FileImport.objects.get_or_create(
domain=domain,
filename=fn.split('/')[-1],
p
|
art=part_name)
if fi.total_lines is None:
tar = tarfile.open(fn, 'r')
fin = tar.extractfile(part_name)
print('%s
|
: Counting lines...' % part_name)
total = fin.read().decode('utf8').count('\n')
fi.current_line = 0
fi.total_lines = total
fi.save()
elif fi.done:
print('%s: Already complete.' % part_name)
return
else:
total = fi.total_lines
print('%s: %i lines found.' % (part_name, total))
tar = tarfile.open(fn, 'r')
fin = tar.extractfile(part_name)
skip_to_line = fi.current_line or 0
i = 0
for line in fin:
i += 1
if skip_to_line and i < skip_to_line:
continue
if i == 1 or not i % commit_freq or i == total:
print(
'%s: Processing line %i of %i %.02f%%.' \
% (part_name, i, total, i/float(total or i)*100))
sys.stdout.flush()
fi.current_line = i
fi.save()
commit()
reset_queries()
line = line.decode('utf8')
edge = ConceptNetEdge.from_string(line)
subject_lang = models.extract_language_code(edge.start)
object_lang = models.extract_language_code(edge.end)
# Ignore edges without sense.
# Note, this skips an estimated 85% of edges.
# start_sense = models.extract_sense(edge.start)
# if not start_sense:
# continue
# end_sense = models.extract_sense(edge.end)
# if not end_sense:
# continue
retry = 0
while 1:
try:
retry += 1
target = None
if not domain.language or subject_lang == domain.language:
target, _ = models.Target.objects.get_or_create(
domain=domain,
slug=edge.target_slug,
defaults=dict(
text=edge.target_text
)
)
target.conceptnet_subject = edge.start
target.enabled = True
target.save()
question = None
if not domain.language or object_lang == domain.language:
question, _ = models.Question.objects.get_or_create(
domain=domain,
slug=edge.question_slug,
defaults=dict(
text=edge.question_text
)
)
question.conceptnet_predicate = edge.rel
question.conceptnet_object = edge.end
question.enabled = True
question.save()
if target and question:
weight, _ = models.TargetQuestionWeight.objects.get_or_create(
target=target,
question=question,
defaults=dict(
weight=edge.weight_int,
count=1000,
))
weight.text = edge.surfaceText
weight.save()
break
except OperationalError as e:
if 'deadlock' in str(e):
print('%s: Retry %i after deadlock.' % (part_name, retry))
else:
raise
print('%s: Complete.' % part_name)
class Command(BaseCommand):
help = 'Loads targets, questions and weights from a ConceptNet5 CSV dump file.'
args = ''
option_list = BaseCommand.option_list + (
#make_option('--seed', default=None),
make_option('--domain', default=''),
make_option('--fn', default=''),
make_option('--parts', default=20),
make_option('--commit-freq', default=10),
make_option('--part-name-template',
default='assertions/part_%02i.csv'),
)
def handle(self, *args, **options):
tmp_settings = settings.DEBUG
settings.DEBUG = False
try:
commit_freq = int(options['commit_freq'])
parts = int(options['parts'])
part_name_template = option
|
sparkslabs/kamaelia_
|
Sketches/RJL/Packages/Examples/P2PStreamPeer/p2pstreampeer.py
|
Python
|
apache-2.0
| 10,507 | 0.007233 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""\
===========================================
Peer-to-Peer
|
Streaming System (client part)
===========================================
This example demonstrates the use of BitTorrent and HTTP to download, share
reconstruct a data stream in real-time.
It expects a webserver hosting a folder that contains:
- meta.txt (a file containing the number of chunks/torrents in the stream
so far as a decimal, ASCII string)
- 1.torrent
- 2.torrent
- ...
- 123.torrent (if meta.txt contained "123")
Only this metainfo is downloaded using HTTP. The stream itself is downloaded
(and uploaded to other downloaders) using BitTorrent.
Other users must upload the stream's chunks using BitTorrent for this demo
to work.
To listen to/view the stream, just point your favourite media player
(say, XMMS) at the reconstructed file after it's been downloading for a minute
or so.
"""
import time
from Axon.Component import component
from Kamaelia.Chassis.Pipeline import pipeline
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Community.RJL.Kamaelia.File.TriggeredFileReader import TriggeredFileReader
from Kamaelia.Community.RJL.Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
from Kamaelia.Community.RJL.Kamaelia.Protocol.Torrent.TorrentPatron import TorrentPatron
from Kamaelia.Community.RJL.Kamaelia.Protocol.Torrent.TorrentIPC import TIPCNewTorrentCreated, TIPCTorrentStatusUpdate
from Kamaelia.Community.RJL.Kamaelia.Util.Clock import CheapAndCheerfulClock
from Kamaelia.Community.RJL.Kamaelia.Util.DataSource import TriggeredSource
class StreamReconstructor(component):
"""\
StreamReconstructor()
This component receives reports on the status/completion of BitTorrent
downloads from a TorrentPatron instance. It keeps a record of the
order in which torrents were started and waits until the first is
finished. It then outputs the filename of this torrent and removes
it from its list. Then it waits for the second torrent (now the first
on the list) to finish downloading, then outputs its filename and so on.
If later torrents finish before earlier ones, their filenames are not
output until their all their predecessors have finished.
The purpose of this is output the names of files whose contents should
be concatenated to a master file to reconstruct the stream.
"""
def main(self):
torrents = []
while 1:
yield 1
while self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, TIPCNewTorrentCreated):
torrents.append([msg.torrentid, msg.savefolder]) # add the new torrent to the list of known torrents
elif isinstance(msg, TIPCTorrentStatusUpdate):
# if the status update is about the oldest torrent that
# has not been completed prior to now, then...
if len(torrents) > 0 and msg.torrentid == torrents[0][0]:
# if this oldest torrent is now complete
if msg.statsdictionary.get("fractionDone",0) == 1:
# forward on the name of the file downloaded in this torrent
self.send(torrents[0][1], "outbox")
torrents.pop(0) # and remove it from our list of torrents that we care about
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, shutdown) or isinstance(msg, producerFinished):
# if we are being told to shutdown then do so
self.send(producerFinished(self), "signal")
return
self.pause()
class PartsFilenameGenerator(component):
"""\
PartsFilenameGenerator()
Arguments:
- prefix - string to prepend to the id of a torrent to make its URL
- [suffix] - string to append to the id of the torrent to make the URL
defaults to ".torrent"
Generate the URLs of the .torrents that make up the stream
from reports of the total number of chunks/torrents in the stream
that are received on "inbox".
e.g. Assuming it was created as
PartsFilenameGenerator("http://www.example.com/", ".torrent"),
Send it "3" and it will output (one message listed per line):
- "http://www.example.com/1.torrent"
- "http://www.example.com/2.torrent"
- "http://www.example.com/3.torrent"
Then send it "3" again and it will output nothing.
Now send it "5" and it will output:
- "http://www.example.com/4.torrent"
- "http://www.example.com/5.torrent"
"""
def __init__(self, prefix, suffix = ".torrent")
self.prefix = prefix
self.suffix = suffix
super(self, PartsFilenameGenerator).__init__()
def main(self):
highestseensofar = 0 # we have not outputted any torrent URLs so far
while 1:
yield 1
while self.dataReady("inbox"):
msg = int(self.recv("inbox"))
# output the URLs of all the torrents whose numbers are > the
# number of last torrent output and <= the value of message received
while highestsofar < msg:
highestsofar += 1
self.send(self.prefix + str(highestsofar) + self.suffix, "outbox")
while self.dataReady("control"):
msg = self.recv("control"):
if isinstance(msg, shutdown) or isinstance(msg, producerFinished):
self.send(producerFinished(self), "signal")
return
self.pause()
def P2PStreamer(torrentsfolder):
"""\
Arguments:
- torrentsfolder, e.g. "http://my.server.example.org/radioFoo/"
"""
# Create a pipeline of components whose net result is to output the contents of a certain URL
# (torrentsfolder + metafilename) every 60 seconds (the contents at the time of output, i.e.
# it fetches the page every 60 seconds).
poller = pipeline(
# This generates a message every 60 seconds to wake TriggeredSource
# allowing us to poll the meta file without busy-waiting.
CheapAndCheerfulClock(60.0),
# This sends the string (torrentsfolder + "meta.txt") every time it receives a message
# This string will be the URL of the meta file on the torrent hosting website
# e.g. "http://my.server.example.org/radioFoo/meta.txt"
TriggeredSource(torrentsfolder + "meta.txt"),
# SimpleHTTPClient retrieves the resource specified by the message it receives,
# which will be URL string.
# i.e. It fetches the page whose URL is (torrentsfolder + "meta.txt) (the string
# produced by TriggeredSource) and forwards on the contents of that page.
# The contents of that particular page will always be a number
# (in the form of a decimal ASCII string) which represents the number of
# 'chunks
|
magnunleno/Rurouni
|
tests/test_table_migration.py
|
Python
|
gpl-3.0
| 2,788 | 0.002869 |
#!/usr/bin/env python
# encoding: utf-8
import pytest
from conftests import *
from rurouni.exceptions import *
from rurouni.types import *
from rurouni import Database, Column, Table
def test_column_appending(ldb):
'''
Checks column appending. To simulate this behaviour just adds two different
classes pointing to the same table.
'''
# First declaration
class Client(Table):
__db__ = ldb.db
pass
ldb.flush()
# Second declaration
class NewClient(Table):
__db__ = ldb.db
__tablename__ = 'client'
name = Column(String)
# Check logs
logs = ldb.getLog()
assert logs[0] == 'PRAGMA table_info("client")'
assert logs[1] == 'ALTER TABLE client ADD name VARCHAR'
assert logs[2] == 'COMMIT'
ldb.destroy()
def test_column_removal(ldb):
'''
Checks column removal. To simulate this behaviour just adds two different
classes pointing to the same table.
|
For this is needed to set the db.autoremove_columns flag as True.
'''
ldb.db.autoremove_columns = True
# First declaration
class Cli
|
ent(Table):
__db__ = ldb.db
firstname = Column(String)
lastname = Column(String)
ldb.flush()
# Second declaration
class NewClient(Table):
__db__ = ldb.db
__tablename__ = 'client'
firstname = Column(String)
# Check logs
logs = ldb.getLog()
assert logs[0] == 'PRAGMA table_info("client")'
assert logs[1] == 'ALTER TABLE client RENAME TO migration_tmp'
assert logs[2] == 'COMMIT'
assert logs[3] == 'CREATE TABLE client ('
assert logs[4] == 'id INTEGER NOT NULL,'
assert logs[5] == 'firstname VARCHAR,'
assert logs[6] == 'PRIMARY KEY (id)'
assert logs[7] == ')'
assert logs[8] == 'COMMIT'
assert logs[9] == 'INSERT INTO client SELECT id ,firstname from migration_tmp'
assert logs[10] == 'COMMIT'
assert logs[11] == 'DROP TABLE migration_tmp'
assert logs[12] == 'COMMIT'
ldb.destroy()
def test_table_removal(tmp_ldb):
'''
Test table removal. For this feature the db.autoClean() must be called at
the end of all table definition.
Also, is nedded to be a persistent database.
'''
# Define two tables
class Client(Table):
__db__ = tmp_ldb.db
pass
class Profession(Table):
__db__ = tmp_ldb.db
pass
# Reopen db and define only one table
tmp_ldb.reopen()
class Client(Table):
__db__ = tmp_ldb.db
pass
tmp_ldb.flush() # Flush output
tmp_ldb.db.autoClean() # Autoclean tables
# Table profession must be dropped
logs = tmp_ldb.getLog()
assert logs[0] == 'DROP TABLE profession'
assert logs[1] == 'COMMIT'
# Remove file
tmp_ldb.whipeout()
|
sysoevss/WebApps17
|
data.py
|
Python
|
mit
| 2,774 | 0.004326 |
# coding=UTF-8
'''
Created on 24.09.2017
@author: sysoev
'''
from google.appengine.ext import db
from google.appengine.api import users
import datetime
import time
import logging
from myusers import MyUser
def force_unicode(string):
if type(string) == unicode:
return string
return string.decode('utf-8')
class Project(db.Model):
name = db.StringProperty(multiline=False)
def getProjectsList(user):
return None
def updateProject(key, name):
p = Project.get(key)
if not p:
return
p.name = name
p.put()
def addProject(name):
p = Project()
p.name = name
p.put()
return p.key()
class UserProject(db.Model):
user_key = db.ReferenceProperty(MyUser)
project_key = db.ReferenceProperty(Project)
number = 0
def addUserProject(user_name, project_key_str):
user_query = MyUser.all()
user = user_query.filter('username = ', user_name).get()
if user is None:
return None
true_project_key = Project.get(project_key_str).key()
if check_user_have_project(user, true_project_key):
return False
up = UserProject()
up.user_key = user.key()
up.project_key = true_project_key
up.put()
return True
def check_user_have_project(user, true_project_key):
user_project_keys = [user_proj.project_key.key() for user_proj in
UserProject.all().filter('user_key = ', user.key()).fetch(None)]
return true_project_key in user_project_keys
def deleteUserProject(user_key, project_key):
query = UserProject.all()
query.filter('user_key = ', MyUser.get(user_key)).filter('project_key = ', Project.get(project_key))
user_project = query.get()
if user_project is None:
return None
# project.key().delete()
db.delete(user_project.key())
return True
def getUserProjects(user):
if user is N
|
one:
return []
query = UserProject.all().filter('user_key = ', user.key())
return [user_project.project_key for user_project in query]
# return [Project.get(user_project.project_key) for user_project in query]
class Request(db.Model):
number = int
name = db.StringProperty()
|
description = db.StringProperty(multiline=True)
state = int
perfomer = db.ReferenceProperty() #???
def addRequests(project_key, name, description):
print("log")
req = Request(parent=project_key)
req.name = name
req.description = description
req.perfomer = ""
req.state = 1
req.number = Request(ancestor = project_key).all().length + 1
req.put()
Project.set(project_key).number += 1
return True
def getRequests(project_key):
if project_key is None:
return []
query = Request(ancestor = project_key).all()
return query
|
tiffanyj41/hermes
|
src/utils/save_load.py
|
Python
|
apache-2.0
| 3,756 | 0.004526 |
import csv
import gzip
def save_vector(vector, output_fname):
"""
Save the any type of vector for future use.
This could be ratings, predictions or the content vector
Results need to be collected to the local history before being read out
Args:
vector: either user ratings, predictions or the content vector
output_fname (str): Local file path to store the vector
"""
if output_fname.endswith('.gz'):
output_file = gzip.open(output_fname, 'w')
else:
output_file = open(output_fname, 'w')
csv_writer = csv.writer(output_file, delimiter=';')
for v in vector:
csv_writer.writerow(v)
output_file.close()
def load_ratings(input_fname):
"""
Loads the rating or predicted rating arrays into the format of int(user_id), int(item_id), float(rating)
The ratings array can then be put into spark by using sc.parallelize()
If you would then like a queriable ratings vector you would follow something similar to the following
ratings_sc = sc.parallelize(ratings)
fields = [StructField("user", LongType(),True),StructField("item", LongType(), True),\
StructField("rating", FloatType(), True) ]
schema = StructType(fields)
ratings_df = sqlCtx.createDataFrame(ratings_sc, schema)
ratings_df.registerTempTable("ratings")
Args:
input_fname (str): Local file path where the vector is stored
Returns:
ratings: array of user ratings or predicted rating
"""
ratings = []
if input_fname.endswith('.gz'):
input_file = gzip.open(input_fname, 'rb')
else:
input_file = open(input_fname, 'rb')
csv_reader = csv.reader(input_file, delimiter=';')
for line in csv_reader:
ratings.append((int(line[0]), int(line[1]), float(line[2])))
return ratings
def load_content_vector(input_fname):
"""
Loads the content vector array into the format of int(item_id), array[0, 1, 0, ....., 0.777773, 0]
The content vector array can then be put into spark by using sc.parallelize()
Args:
input_fname (str): Local file path where the vector is stored
Returns:
content_vector: array of the content vector
"""
content_vector = []
if input_fname.endswith('.gz'):
input_file = gzip.open(input_fname, 'rb')
else:
input_file = open(input_fname, 'rb')
csv_reader = csv.reader(input_file, delimiter=';')
for line in csv_reader:
item = int(line[0])
content1 = line[1].strip("[]")
content = [float(i) for i in str.split(content1, ' ')]
content_vector.append((item, content))
return con
|
tent_vector
def save_uv_to_hadoop(vector, output_name):
vector.map(lambda x: ','.join(map(str,x))).saveAsTextFile(output_name)
def load_uv_from_hadoop(input_name, sc, num_partitions=20):
uv = sc.textFile(input_name).map(parseText)\
.repartition(num_partitions)
return uv
def parseText(row):
row = row.split(',')
return (int(row[0]), int(row[1]), float(row[2]))
def rm_hdfs_dir(hdfs_dir):
cmd = "hadoop fs -rm -R " + hdfs_dir
|
import subprocess
cmd_output = subprocess.check_output(cmd, shell=True)
return cmd_output
def save_to_hadoop(vector, output_name):
import subprocess
try:
# overwrite the past vector that was saved
rm_hdfs_dir(output_name)
except subprocess.CalledProcessError as e:
# hdfs directory "output_name" does not exist
# do nothing
pass
# save vector as output_name in hdfs
vector.saveAsPickleFile(output_name)
def load_from_hadoop(input_name,sc, num_partitions=20):
cv = sc.pickleFile(input_name).repartition(num_partitions)
return cv
|
TryExceptElse/pysheetdata
|
eval/parser.py
|
Python
|
mit
| 4,789 | 0 |
"""
functions for evaluating spreadsheet functions
primary function is parse, which the rest revolves around
evaluate should be called with the full string by a parent program
A note on exec:
This uses the exec function repeatedly, and where possible, use of it
should be minimized, but the intention of this is only meant to be run
on trusted spreadsheets. Future development of this may focus on it being
more secure, but the primary goal is simply to evaluate the most common
functions, regardless the ability for code to be injected.
Another note:
this whole thing could stand to be redone
"""
# import spreadsheet mirroring functions
import eval.functions as functions
import eval.translate as translate
import eval.storage as global_file # historical reasons for name
__author__ = 'user0'
def evaluate(s, reference_dictionary=None):
# if included, reference dictionary is a dictionary of relevant
# cell references.
# alternatively, if reference_dictionary is None, it is presumed
# that it is not needed to replace references with values in the
# formula. The reference_type arg, if none, defaults to 'sheet'
if s[0] == '=':
# get rid of the equals sign at the beginning of the formula
s = s[1:]
# send reference dictionary to storage
global_file.formulas = reference_dictionary
# I feel like I'm forgetting something else here
return parse(s)
def parse(s, function=None):
# returns evaluation of formula via recursive function;
# before this function is run, dependencies should be
# identified and evaluated
replace = {}
it = 0
level = 0
# replace references with cell values
s = s.lower()
# for formula in global_file.formulas:
# if formula in s:
# s = s.replace(formula, str(
# global_file.formulas[formula].return_value()))
# replace values with python equivalents
# ('^' with '**' for example)
s = translate.spreadsheet_replace(s)
# evaluate formula
for char in s:
if char == '(':
level += 1
if level == 1:
parent_start = it
if char == ')':
level -= 1
if level == 0:
parent_close = it
prefix = get_prefix(s, parent_start)
body = s[parent_start
|
+ 1: parent_close]
formula = '{}({})'.format(prefix, body)
replace[formula] = str(parse(prefix, body))
verbose('replacing
|
{} with {}'.format(formula,
replace[formula]))
it += 1
# replace strings
for entry in replace:
s = s.replace(entry, replace[entry])
# depending on the presence of a function, either simply evaluate,
# or use a function from functions
if function:
# if function is in the replacement dictionary,
# replace it with that entry
if function in functions.function_replace:
function = functions.function_replace[function]
else:
print('function %s was not in function dictionary') % function
# function just stopped sounding like a word
# insert the formula in a python-readable format
body_strings = s.split(',') # this is used below
exec_string = '%s(body_strings)' % function
else:
# replace references with values and find result
s = s.lower()
for reference in global_file.formulas:
while reference.lower() in s:
replacement_cell = global_file.formulas[reference]
if replacement_cell.data_type == 'string' and \
not replacement_cell.script:
replacement = '\'%s\'' % replacement_cell.text
else:
replacement = replacement_cell.value
s = s.replace(reference.lower(), replacement)
exec_string = s
exec_string = eval_append(exec_string)
verbose(exec_string)
exec(exec_string)
return global_file.returned
def get_prefix(formula_string, start):
alpha = 'abcdefghijklmnopqrstuvwxyz'
number = '.0123456789'
prefix = ''
string_position = start - 1
while True:
character = formula_string[string_position]
if string_position >= 0:
if character in alpha or character in number:
prefix = character + prefix
else:
return prefix
else:
return prefix
string_position -= 1
def eval_append(s):
prefix = 'global_file.returned = '
return prefix + s
def verbose(s):
# if verbose setting, print s
if global_file.verbose:
print(s)
|
JeremyGrosser/python-eventlet
|
tests/stdlib/test_urllib2_localnet.py
|
Python
|
mit
| 410 | 0.007317 |
from eventlet import patcher
|
from eventlet.green import BaseHTTPServer
from eventlet.green import threading
from eventlet.green import socket
from eventlet.green import urllib2
patcher.inject('test.test_urllib2_localnet',
globals(),
|
('BaseHTTPServer', BaseHTTPServer),
('threading', threading),
('socket', socket),
('urllib2', urllib2))
if __name__ == "__main__":
test_main()
|
DemocracyClub/UK-Polling-Stations
|
polling_stations/apps/data_importers/management/commands/import_wyre_forest.py
|
Python
|
bsd-3-clause
| 949 | 0.001054 |
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "WYE"
addresses_name = "2021-03-29T13:16:
|
10.236797/Democracy_Club__06May2021.tsv"
stations_name = "2021-03-29T13:16:10.236797/Democracy_Club__06May2021.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"10003382058", # THE PATCH, LEIGHT LANE, RIBBESFORD, BEWDLEY
]:
return None
if record.addressline6 in [
"DY12 2TN",
|
"DY10 3HJ",
"DY10 2QD",
"DY10 3TF",
"DY11 5QT",
"DY10 3HH",
"DY10 1SB",
"DY10 1LS",
"DY10 3EL",
]:
return None
return super().address_record_to_dict(record)
|
jgrandguillaume/vertical-ngo
|
logistic_budget/wizard/cost_estimate.py
|
Python
|
agpl-3.0
| 1,462 | 0 |
# -*- co
|
ding: utf-8 -*-
#
# Author: Joël Grand-Guillaume
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it
|
under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm
from openerp.tools.translate import _
class logistic_requisition_cost_estimate(orm.TransientModel):
_inherit = 'logistic.requisition.cost.estimate'
def _check_requisition(self, cr, uid, requisition, context=None):
""" Check the rules to create a cost estimate from the
requisition
:returns: list of tuples ('message, 'error_code')
"""
errors = []
if not requisition.budget_holder_id:
error = (_('The requisition must be validated '
'by the Budget Holder.'),
'NO_BUDGET_VALID')
errors.append(error)
return errors
|
dmccloskey/SBaaS_rnasequencing
|
SBaaS_rnasequencing/stage01_rnasequencing_analysis_postgresql_models.py
|
Python
|
mit
| 2,579 | 0.027918 |
from SBaaS_base.postgresql_orm_base import *
class data_stage01_rnasequencing_analysis(Base):
__tablename__ = 'data_stage01_rnasequencing_analysis'
id = Column(Integer, Sequence('data_stage01_rnasequencing_analysis_id_seq'), primary_key=True)
analysis_id = Column(String(500))
experiment_id = Column(String(50))
sample_name_abbreviation = Column(String(500)) # equivalent to sample_name_abb
|
reviation
sample_name = Column(String(500)) # equivalent to sample_name_abbreviation
time_point = Column(String(10)) # converted to intermediate in lineage analysis
analysis_type = Column(String(100)); # time-course (i.e., multiple time points), paired (i.e., control compared to multiple replicates), group (i.e., single grouping of samples).
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__
|
= (
UniqueConstraint('experiment_id','sample_name_abbreviation','sample_name','time_point','analysis_type','analysis_id'),
)
def __init__(self,
row_dict_I,
):
self.analysis_id=row_dict_I['analysis_id'];
self.experiment_id=row_dict_I['experiment_id'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.sample_name=row_dict_I['sample_name'];
self.time_point=row_dict_I['time_point'];
self.analysis_type=row_dict_I['analysis_type'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self,analysis_id_I,
experiment_id_I,
sample_name_abbreviation_I,
sample_name_I,
time_point_I,
analysis_type_I,
used__I,
comment__I):
self.analysis_id=analysis_id_I
self.experiment_id=experiment_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.sample_name=sample_name_I
self.time_point=time_point_I
self.analysis_type=analysis_type_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'analysis_id':self.analysis_id,
'experiment_id':self.experiment_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'sample_name':self.sample_name,
'time_point':self.time_point,
'analysis_type':self.analysis_type,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
CodaMais/CodaMais
|
CodaMais/user/managers.py
|
Python
|
gpl-3.0
| 1,179 | 0 |
# standard library
import logging
# Django
from django.contrib.auth.models import BaseUserManager
# logger instance
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class UserManager(BaseUserManager):
def create_user(self, email, password, username, first_name, **kwargs):
logger.info("Creating user.")
user = self.model(email=self.normalize_email(email),
|
username=username,
first_name=first_name,
is_active=False,
**kwargs)
user.set_password(password)
user.save(using=self.db)
return user
def create_superuser(self, email, password,
first_name, **kwargs):
logger.info("Creating superuser.")
user = self.model(email=self.normalize_email(email),
|
first_name=first_name,
is_staff=True,
is_active=True,
is_superuser=True,
**kwargs)
user.set_password(password)
user.save(using=self.db)
return user
|
shakna-israel/rst2pdf
|
gui/Ui_configdialog.py
|
Python
|
mit
| 7,900 | 0.003418 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'configdialog.ui'
#
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(993, 455)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/kaddressbook.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.verticalLayout_6 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.splitter = QtGui.QSplitter(Dialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.pagelist = QtGui.QListWidget(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pagelist.sizePolicy().hasHeightForWidth())
self.pagelist.setSizePolicy(sizePolicy)
self.pagelist.setMaximumSize(QtCore.QSize(180, 16777215))
self.pagelist.setObjectName("pagelist")
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.container = QtGui.QScrollArea(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.container.sizePolicy().hasHeightForWidth())
self.container.setSizePolicy(sizePolicy)
self.container.setFrameShape(QtGui.QFrame.NoFrame)
self.container.setWidgetResizable(True)
self.container.setObjectName("container")
self.scrollAreaWidgetContents = QtGui.QW
|
idget(self.container)
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 241, 399))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLay
|
out_3 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.layout = QtGui.QVBoxLayout()
self.layout.setObjectName("layout")
self.verticalLayout_3.addLayout(self.layout)
self.container.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.container)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton_2 = QtGui.QPushButton(self.layoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.tabWidget = QtGui.QTabWidget(self.splitter)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.scrollArea = QtGui.QScrollArea(self.tab)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_2 = QtGui.QWidget(self.scrollArea)
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 532, 405))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_2)
self.verticalLayout_4.setSpacing(3)
self.verticalLayout_4.setContentsMargins(0, 3, 0, -1)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.zoomin = QtGui.QToolButton(self.scrollAreaWidgetContents_2)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/viewmag+.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoomin.setIcon(icon1)
self.zoomin.setObjectName("zoomin")
self.horizontalLayout_4.addWidget(self.zoomin)
self.zoomout = QtGui.QToolButton(self.scrollAreaWidgetContents_2)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/viewmag-.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoomout.setIcon(icon2)
self.zoomout.setObjectName("zoomout")
self.horizontalLayout_4.addWidget(self.zoomout)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.verticalLayout_4.addLayout(self.horizontalLayout_4)
self.preview = QtGui.QLabel(self.scrollAreaWidgetContents_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.preview.sizePolicy().hasHeightForWidth())
self.preview.setSizePolicy(sizePolicy)
self.preview.setFrameShape(QtGui.QFrame.NoFrame)
self.preview.setObjectName("preview")
self.verticalLayout_4.addWidget(self.preview)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem2)
self.scrollArea.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout_2.addWidget(self.scrollArea)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.tab_2)
self.verticalLayout_5.setMargin(0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.snippet = QtGui.QTextBrowser(self.tab_2)
self.snippet.setObjectName("snippet")
self.verticalLayout_5.addWidget(self.snippet)
self.tabWidget.addTab(self.tab_2, "")
self.verticalLayout_6.addWidget(self.splitter)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.pushButton_2, QtCore.SIGNAL("clicked()"), Dialog.accept)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Bookrest Settings", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("Dialog", "Close", None, QtGui.QApplication.UnicodeUTF8))
self.zoomin.setText(QtGui.QApplication.translate("Dialog", "...", None, QtGui.QApplication.UnicodeUTF8))
self.zoomout.setText(QtGui.QApplication.translate("Dialog", "...", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("Dialog", "Preview", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtGui.QApplication.translate("Dialog", "Output", None, QtGui.QApplication.UnicodeUTF8))
import icons_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
netconstructor/django-activity-stream
|
actstream/views.py
|
Python
|
bsd-3-clause
| 3,684 | 0.011129 |
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from actstream.models import Follow, Action, user_stream, actor_stream, model_stream
@login_required
def follow_unfollow(request, content_type_id, object_id, follow=True):
"""
Creates follow relationship st ``request.user`` starts following the actor defined by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
actor = get_object_or_404(ctype.model_class(), pk=object_id)
lookup = {
'user': request.user,
'content_type': ctype,
'object_id': object_id,
}
if follow:
Follow.objects.get_or_create(**lookup)
return type('Created', (HttpResponse,), {'status_code':201})()
Follow.objects.get(**lookup).delete()
return type('Deleted', (HttpResponse,), {'status_code':204})()
@login_required
def stream(request):
"""
Index page for authenticated user's activity stream. (Eg: Your feed at github.com)
"""
return render_to_response('activity/actor.html', {
'ctype': ContentType.objects.get_for_model(request.user),
'actor':request.user,'action_list':user_stream(request.user)
}, context_instance=RequestContext(request))
def followers(request, content_type_id, object_id):
"""
Creates a listing of ``User``s that follow the actor defined by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
follows = Follow.objects.filter(content_type=ctype, object_id=object_id)
actor = get_object_or_404(ctype.model_class(), pk=object_id)
return render_to_response('activity/followers.html', {
'followers': [f.user for f in follows], 'actor':actor
}, context_instance=RequestContext(request))
def user(request, username):
"""
``User`` focused activity stream. (Eg: Profile page twitter.com/justquick)
"""
user = get_object_or_404(User, username=username)
return render_to_response('activity/actor.html', {
'ctype': ContentType.objects.get_for_model(User),
'actor':user,'action_list':actor_stream(user)
}, context_instance=RequestContext(request))
def detail(request, action_id):
"""
``Action`` detail view (pretty boring, mainly used for get_absolute_url)
"""
return render_to_response('activity/detail.html', {
'action': get_object_or_404(Action, pk=action_id)
}, context_in
|
stance=RequestContext(request))
def actor(request, content_type_id, object_id):
"""
``Actor`` focused activity stream for actor defi
|
ned by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
actor = get_object_or_404(ctype.model_class(), pk=object_id)
return render_to_response('activity/actor.html', {
'action_list': actor_stream(actor), 'actor':actor,'ctype':ctype
}, context_instance=RequestContext(request))
def model(request, content_type_id):
"""
``Actor`` focused activity stream for actor defined by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
actor = ctype.model_class()
return render_to_response('activity/actor.html', {
'action_list': model_stream(actor),'ctype':ctype,'actor':ctype#._meta.verbose_name_plural.title()
}, context_instance=RequestContext(request))
|
eXistenZNL/SickRage
|
sickbeard/clients/deluged_client.py
|
Python
|
gpl-3.0
| 6,499 | 0.005539 |
# Author: Paul Wollaston
# Contributions: Luke Mullan
#
# This client script allows connection to Deluge Daemon directly, completely
# circumventing the requirement to use the WebUI.
import json
from base64 import b64encode
import sickbeard
from sickbeard import logger
from .generic import GenericClient
from synchronousdeluge import DelugeClient
class DelugeDAPI(GenericClient):
drpc = None
def __init__(self, host=None, username=None, password=None):
super(DelugeDAPI, self).__init__('DelugeD', host, username, password)
def _get_auth(self):
if not self.connect():
return None
return True
def connect(self, reconnect = False):
hostname = self.host.replace("/", "").split(':')
if not self.drpc or reconnect:
self.drpc = DelugeRPC(hostname[1], port = hostname[2], username = self.username, password = self.password)
return self.drpc
def _add_torrent_uri(self, result):
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
options = {
'add_paused': sickbeard.TORRENT_PAUSED
}
remote_torrent = self.drpc.add_torrent_magnet(result.url, options, result.hash)
if not remote_torrent:
return None
result.hash = remote_torrent
return remote_torrent
def _add_torrent_file(self, result):
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if not result.content: result.content = {}
if not result.content:
|
return None
options = {
'add_paused': sickbeard.TORRENT_PAUSED
}
remote_torrent = self.drpc.add_torrent_file(result.name + '.torrent', result.content, options, resul
|
t.hash)
if not remote_torrent:
return None
result.hash = remote_torrent
return remote_torrent
def _set_torrent_label(self, result):
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if ' ' in label:
logger.log(self.name + u': Invalid label. Label must not contain a space', logger.ERROR)
return False
if label:
if self.drpc.set_torrent_label(result.hash, label):
return True
return False
def _set_torrent_ratio(self, result):
return True
def _set_torrent_path(self, result):
path = sickbeard.TORRENT_PATH
if path:
if self.drpc.set_torrent_path(result.hash, path):
return True
return False
def _set_torrent_pause(self, result):
if sickbeard.TORRENT_PAUSED:
return self.drpc.pause_torrent(result.hash)
return True
def testAuthentication(self):
if self.connect(True) and self.drpc.test():
return True, 'Success: Connected and Authenticated'
else:
return False, 'Error: Unable to Authenticate! Please check your config!'
class DelugeRPC(object):
host = 'localhost'
port = 58846
username = None
password = None
client = None
def __init__(self, host = 'localhost', port = 58846, username = None, password = None):
super(DelugeRPC, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
def connect(self):
self.client = DelugeClient()
self.client.connect(self.host, int(self.port), self.username, self.password)
def test(self):
try:
self.connect()
except:
return False
return True
def add_torrent_magnet(self, torrent, options, torrent_hash):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options).get()
if not torrent_id:
torrent_id = self._check_torrent(torrent_hash)
except Exception as err:
return False
finally:
if self.client:
self.disconnect()
return torrent_id
def add_torrent_file(self, filename, torrent, options, torrent_hash):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get()
if not torrent_id:
torrent_id = self._check_torrent(torrent_hash)
except Exception as err:
return False
finally:
if self.client:
self.disconnect()
return torrent_id
def set_torrent_label(self, torrent_id, label):
try:
self.connect()
self.client.label.set_torrent(torrent_id, label).get()
except Exception as err:
logger.log('DelugeD: Failed to set label for torrent: ' + err + ' ' + traceback.format_exc(), logger.ERROR)
return False
finally:
if self.client:
self.disconnect()
return True
def set_torrent_path(self, torrent_id, path):
try:
self.connect()
self.client.core.set_torrent_move_completed_path(torrent_id, path).get()
self.client.core.set_torrent_move_completed(torrent_id, 1).get()
except Exception as err:
logger.log('DelugeD: Failed to set path for torrent: ' + err + ' ' + traceback.format_exc(), logger.ERROR)
return False
finally:
if self.client:
self.disconnect()
return True
def pause_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.pause_torrent(torrent_ids).get()
except Exception as err:
logger.log('DelugeD: Failed to pause torrent: ' + err + ' ' + traceback.format_exc(), logger.ERROR)
return False
finally:
if self.client:
self.disconnect()
return True
def disconnect(self):
self.client.disconnect()
def _check_torrent(self, torrent_hash):
torrent_id = self.client.core.get_torrent_status(torrent_hash, {}).get()
if torrent_id['hash']:
logger.log('DelugeD: Torrent already exists in Deluge', logger.DEBUG)
return torrent_hash
return False
api = DelugeDAPI()
|
hep-gc/repoman
|
server/repoman/repoman/lib/storage/storage.py
|
Python
|
gpl-3.0
| 204 | 0.009804 |
import os
from pylons import app_globals
def
|
delete_image(image):
paths = image.path.split(';')
for p in paths:
path = os.path.join(app_globals.image_storage
|
, p)
os.remove(path)
|
WuNL/mylaptop
|
install/lib/python2.7/dist-packages/mapping_dlut/msg/_Map.py
|
Python
|
bsd-3-clause
| 10,485 | 0.017167 |
"""autogenerated by genpy from mapping_dlut/Map.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import mapping_dlut.msg
import std_msgs.msg
class Map(genpy.Message):
_md5sum = "e6ab6c8862bf55f4e1b5fd48f03f1a7d"
_type = "mapping_dlut/Map"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
###########################################################
#Elevation Map Configuration
#half size of the map;
int32 nHalfSize
#Elevation Grid Resolution, in meter;
float32 fResolution
#x coordinate of the center of the map in world frame
float32 fCenterX
#y coordinate of the center of the map in world frame
float32 fCenterY
#maximum elevation of the map in world frame
float32 fMapMaxElevation
#minimum elevation of the map in world frame
float32 fMapMinElevation
###########################################################
###########################################################
#Vehicle Status
#vehicle x in world frame, in meters
float32 fVehicleX
#vehicle y in world frame, in meters
float32 fVehicleY
#vehicle z in world frame, in meters
float32 fVehicleZ
#vehicle heading angle, in rad
float32 fVehicleHeading
###########################################################
Grid[] map
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: mapping_dlut/Grid
#Header header
#maximum elevation in this grid;
#float32 fMaxElevation
#minimum elevation in this grid;
#float32 fMinElevation
#average elevation in this grid;
#float32 fAvgElevation
#points falling in this grid;
#int32 nPointCount
#up point falling in this grid;
#int32 nUpCount
#down point falling in this grid;
#int32 nDownCount
#average elevation in this grid;
float32 fAvgElevation
#proability
int8 proability
#texture
int8 texture
"""
__slots__ = ['header','nHalfSize','fResolution','fCenterX','fCenterY','fMapMaxElevation','fMapMinElevation','fVehicleX','fVehicleY','fVehicleZ','fVehicleHeading','map']
_slot_types = ['std_msgs/Header','int32','float32','float32','float32','float32','float32','float32','float32','float32','float32','mapping_dlut/Grid[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,nHalfSize,fResolution,fCenterX,fCenterY,fMapMaxElevation,fMapMinElevation,fVehicleX,fVehicleY,fVehicleZ,fVehicleHeading,map
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Map, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.nHalfSize is None:
self.nHalfSize = 0
if self.fResolution is None:
self.fResolution = 0.
if self.fCenterX is None:
self.fCenterX = 0.
if self.fCenterY is None:
self.fCenterY = 0.
if self.fMapMaxElevation is None:
self.fMapMaxElevation = 0.
if self.fMapMinElevation is None:
self.fMapMinElevation = 0.
if self.fVehicleX is None:
self.fVehicleX = 0.
if self.fVehicleY is None:
self.fVehicleY = 0.
if self.fVehicleZ is None:
self.fVehicleZ = 0.
if self.fVehicleHeading is None:
self.fVehicleHeading = 0.
if self.map is None:
self.map = []
else:
self.header = std_msgs.msg.Header()
self.nHalfSize = 0
self.fResolution = 0.
self.fCenterX = 0.
self.fCenterY = 0.
self.fMapMaxElevation = 0.
self.fMapMinElevation = 0.
self.fVehicleX = 0.
self.fVehicleY = 0.
self.fVehicleZ = 0.
self.fVehicleHeading = 0.
self.map = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_i9f.pack(_x.nHalfSize, _x.fResolution, _x.fCenterX, _x.fCenterY, _x.fMapMaxElevation, _x.fMapMinElevation, _x.fVehicleX, _x.fVehicleY, _x.fVehicleZ, _x.fVehicleHeading))
length = len(self.map)
buff.write(_struct_I.pack(length))
for val1 in self.map:
_x = val1
buff.write(_struct_f2b.pack(_x.fAvgElevation, _x.proability, _x.texture))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``s
|
tr``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.map is None:
self.map = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.heade
|
r.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 40
(_x.nHalfSize, _x.fResolution, _x.fCenterX, _x.fCenterY, _x.fMapMaxElevation, _x.fMapMinElevation, _x.fVehicleX, _x.fVehicleY, _x.fVehicleZ, _x.fVehicleHeading,) = _struct_i9f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.map = []
for i in range(0, length):
val1 = mapping_dlut.msg.Grid()
_x = val1
start = end
end += 6
(_x.fAvgElevation, _x.proability, _x.texture,) = _struct_f2b.unpack(str[start:end])
self.map.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
|
elopio/snapcraft
|
snapcraft/plugins/go.py
|
Python
|
gpl-3.0
| 8,148 | 0 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The go plugin can be used for go projects using `go get`.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- go-packages:
(list of strings)
Go packages to fetch, these must be a "main" package. Dependencies
are pulled in automatically by `go get`.
Packages that are not "main" will not cause an error, but would
not be useful either.
If the package is a part of the go-importpath the local package
corresponding to those sources will be used.
- go-importpath:
(string)
This entry tells the checked out `source` to live within a certain path
within `GOPATH`.
This is not needed and does not affect `go-packages`.
- go-buildtags:
(list of strings)
Tags to use during the go build. Default is not to use any build tags.
"""
import logging
import os
import shutil
from glob import iglob
import snapcraft
from snapcraft import common
logger = logging.getLogger(__name__)
class GoPlugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema['properties']['go-packages'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string',
},
'default': [],
}
schema['properties']['go-importpath'] = {
'type': 'string',
'default': ''
}
schema['properties']['go-buildtags'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string',
},
'default': []
}
if 'required' in schema:
del schema['required']
return schema
@classmethod
def get_build_properties(cls):
# Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
return ['go-packages', 'go-buildtags']
@classmethod
def get_pull_properties(cls):
# Inform Snapcraft of the properties associated with pulling. If these
# change in the YAML Snapcraft will consider the pull step dirty.
return ['go-packages']
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.build_packages.append('golang-go')
self._gopath = os.path.join(self.partdir, 'go')
self._gopath_src = os.path.join(self._gopath, 'src')
self._gopath_bin = os.path.join(self._gopath, 'bin')
self._gopath_pkg = os.path.join(self._gopath, 'pkg')
def pull(self):
# use -d to only download (build will happen later)
# use -t to also get the test-deps
# since we are not using -u the sources will stick to the
# original checkout.
super().pull()
os.makedirs(self._gopath_src, exist_ok=True)
if any(iglob('{}/**/*.go'.format(self.sourcedir), recursive=True)):
go_package = self._get_local_go_package()
go_package_path = os.path.join(self._gopath_src, go_package)
if os.path.islink(go_package_path):
os.unlink(go_package_path)
os.makedirs(os.path.dirname(go_package_path), exist_ok=True)
os.symlink(self.sourcedir, go_package_path)
self._run(['go', 'get', '-t', '-d', './{}/...'.format(go_package)])
for go_package in self.options.go_packages:
self._run(['go', 'get', '-t', '-d', go_package])
def clean_pull(self):
super().clean_pull()
# Remove the gopath (if present)
if os.path.exists(self._gopath):
shutil.rmtree(self._gopath)
def _get_local_go_package(self):
if self.options.go_importpath:
go_package = self.options.go_importpath
else:
logger.warning(
'Please consider setting `go-importpath` for the {!r} '
'part'.format(self.name))
go_package = os.path.basename(os.path.abspath(self.options.source))
return go_package
def _get_
|
local_main_packages(self):
search_path = './{}/...'.format(self._get_local_go_package())
packages = self._run_output(['go', 'list', '-f',
'{{.ImportPath}} {{.Name}}'
|
,
search_path])
packages_split = [p.split() for p in packages.splitlines()]
main_packages = [p[0] for p in packages_split if p[1] == 'main']
return main_packages
def build(self):
super().build()
tags = []
if self.options.go_buildtags:
tags = ['-tags={}'.format(','.join(self.options.go_buildtags))]
packages = self.options.go_packages
if not packages:
packages = self._get_local_main_packages()
for package in packages:
binary = os.path.join(self._gopath_bin, self._binary_name(package))
self._run(['go', 'build', '-o', binary] + tags + [package])
install_bin_path = os.path.join(self.installdir, 'bin')
os.makedirs(install_bin_path, exist_ok=True)
for binary in os.listdir(self._gopath_bin):
binary_path = os.path.join(self._gopath_bin, binary)
shutil.copy2(binary_path, install_bin_path)
def _binary_name(self, package):
package = package.replace('/...', '')
return package.split('/')[-1]
def clean_build(self):
super().clean_build()
if os.path.isdir(self._gopath_bin):
shutil.rmtree(self._gopath_bin)
if os.path.isdir(self._gopath_pkg):
shutil.rmtree(self._gopath_pkg)
def _run(self, cmd, **kwargs):
env = self._build_environment()
return self.run(cmd, cwd=self._gopath_src, env=env, **kwargs)
def _run_output(self, cmd, **kwargs):
env = self._build_environment()
return self.run_output(cmd, cwd=self._gopath_src, env=env, **kwargs)
def _build_environment(self):
env = os.environ.copy()
env['GOPATH'] = self._gopath
env['GOBIN'] = self._gopath_bin
include_paths = []
for root in [self.installdir, self.project.stage_dir]:
include_paths.extend(
common.get_library_paths(root, self.project.arch_triplet))
flags = common.combine_paths(include_paths, '-L', ' ')
env['CGO_LDFLAGS'] = '{} {} {}'.format(
env.get('CGO_LDFLAGS', ''), flags, env.get('LDFLAGS', ''))
if self.project.is_cross_compiling:
env['CC'] = '{}-gcc'.format(self.project.arch_triplet)
env['CXX'] = '{}-g++'.format(self.project.arch_triplet)
env['CGO_ENABLED'] = '1'
# See https://golang.org/doc/install/source#environment
go_archs = {
'armhf': 'arm',
'i386': '386',
'ppc64el': 'ppc64le',
}
env['GOARCH'] = go_archs.get(self.project.deb_arch,
self.project.deb_arch)
if self.project.deb_arch == 'armhf':
env['GOARM'] = '7'
return env
def enable_cross_compilation(self):
pass
|
SINGROUP/pycp2k
|
pycp2k/classes/_each286.py
|
Python
|
lgpl-3.0
| 1,114 | 0.001795 |
from pycp2k.inputsection import InputSection
class _each286(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = Non
|
e
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
s
|
elf._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
|
Aurous/Magic-Discord-Bot
|
discord/opus.py
|
Python
|
gpl-3.0
| 8,162 | 0.003798 |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software wit
|
hout restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to
|
the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import ctypes
import ctypes.util
import array
from .errors import DiscordException
import logging
import sys
import os.path
log = logging.getLogger(__name__)
c_int_ptr = ctypes.POINTER(ctypes.c_int)
c_int16_ptr = ctypes.POINTER(ctypes.c_int16)
c_float_ptr = ctypes.POINTER(ctypes.c_float)
class EncoderStruct(ctypes.Structure):
pass
EncoderStructPtr = ctypes.POINTER(EncoderStruct)
# A list of exported functions.
# The first argument is obviously the name.
# The second one are the types of arguments it takes.
# The third is the result type.
exported_functions = [
('opus_strerror', [ctypes.c_int], ctypes.c_char_p),
('opus_encoder_get_size', [ctypes.c_int], ctypes.c_int),
('opus_encoder_create', [ctypes.c_int, ctypes.c_int, ctypes.c_int, c_int_ptr], EncoderStructPtr),
('opus_encode', [EncoderStructPtr, c_int16_ptr, ctypes.c_int, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int32),
('opus_encoder_ctl', None, ctypes.c_int32),
('opus_encoder_destroy', [EncoderStructPtr], None)
]
def libopus_loader(name):
# create the library...
lib = ctypes.cdll.LoadLibrary(name)
# register the functions...
for item in exported_functions:
try:
func = getattr(lib, item[0])
except Exception as e:
raise e
try:
if item[1]:
func.argtypes = item[1]
func.restype = item[2]
except KeyError:
pass
return lib
try:
if sys.platform == 'win32':
_basedir = os.path.dirname(os.path.abspath(__file__))
_bitness = 'x64' if sys.maxsize > 2**32 else 'x86'
_filename = os.path.join(_basedir, 'bin', 'libopus-0.{}.dll'.format(_bitness))
_lib = libopus_loader(_filename)
else:
_lib = libopus_loader(ctypes.util.find_library('opus'))
except Exception as e:
_lib = None
def load_opus(name):
"""Loads the libopus shared library for use with voice.
If this function is not called then the library uses the function
`ctypes.util.find_library`__ and then loads that one
if available.
.. _find library: https://docs.python.org/3.5/library/ctypes.html#finding-shared-libraries
__ `find library`_
Not loading a library leads to voice not working.
This function propagates the exceptions thrown.
Warning
--------
The bitness of the library must match the bitness of your python
interpreter. If the library is 64-bit then your python interpreter
must be 64-bit as well. Usually if there's a mismatch in bitness then
the load will throw an exception.
Note
----
On Windows, the .dll extension is not necessary. However, on Linux
the full extension is required to load the library, e.g. ``libopus.so.1``.
On Linux however, `find library`_ will usually find the library automatically
without you having to call this.
Parameters
----------
name: str
The filename of the shared library.
"""
global _lib
_lib = libopus_loader(name)
def is_loaded():
"""Function to check if opus lib is successfully loaded either
via the ``ctypes.util.find_library`` call of :func:`load_opus`.
This must return ``True`` for voice to work.
Returns
-------
bool
Indicates if the opus library has been loaded.
"""
global _lib
return _lib is not None
class OpusError(DiscordException):
"""An exception that is thrown for libopus related errors.
Attributes
----------
code : int
The error code returned.
"""
def __init__(self, code):
self.code = code
msg = _lib.opus_strerror(self.code).decode('utf-8')
log.info('"{}" has happened'.format(msg))
super().__init__(msg)
class OpusNotLoaded(DiscordException):
"""An exception that is thrown for when libopus is not loaded."""
pass
# Some constants...
OK = 0
APPLICATION_AUDIO = 2049
APPLICATION_VOIP = 2048
APPLICATION_LOWDELAY = 2051
CTL_SET_BITRATE = 4002
CTL_SET_BANDWIDTH = 4008
CTL_SET_FEC = 4012
CTL_SET_PLP = 4014
band_ctl = {
'narrow': 1101,
'medium': 1102,
'wide': 1103,
'superwide': 1104,
'full': 1105,
}
class Encoder:
def __init__(self, sampling, channels, application=APPLICATION_AUDIO):
self.sampling_rate = sampling
self.channels = channels
self.application = application
self.frame_length = 20
self.sample_size = 2 * self.channels # (bit_rate / 8) but bit_rate == 16
self.samples_per_frame = int(self.sampling_rate / 1000 * self.frame_length)
self.frame_size = self.samples_per_frame * self.sample_size
if not is_loaded():
raise OpusNotLoaded()
self._state = self._create_state()
self.set_bitrate(128)
self.set_fec(True)
self.set_expected_packet_loss_percent(0.15)
self.set_bandwidth('full')
def __del__(self):
if hasattr(self, '_state'):
_lib.opus_encoder_destroy(self._state)
self._state = None
def _create_state(self):
ret = ctypes.c_int()
result = _lib.opus_encoder_create(self.sampling_rate, self.channels, self.application, ctypes.byref(ret))
if ret.value != 0:
log.info('error has happened in state creation')
raise OpusError(ret.value)
return result
def set_bitrate(self, kbps):
kbps = min(128, max(16, int(kbps)))
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_BITRATE, kbps * 1024)
if ret < 0:
log.info('error has happened in set_bitrate')
raise OpusError(ret)
return kbps
def set_bandwidth(self, req):
if req not in band_ctl:
raise KeyError('%r is not a valid bandwidth setting. Try one of: %s' % (req, ','.join(band_ctl)))
k = band_ctl[req]
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_BANDWIDTH, k)
if ret < 0:
log.info('error has happened in set_bandwidth')
raise OpusError(ret)
def set_fec(self, enabled=True):
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_FEC, 1 if enabled else 0)
if ret < 0:
log.info('error has happened in set_fec')
raise OpusError(ret)
def set_expected_packet_loss_percent(self, percentage):
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_PLP, min(100, max(0, int(percentage * 100))))
if ret < 0:
log.info('error has happened in set_expected_packet_loss_percent')
raise OpusError(ret)
def encode(self, pcm, frame_size):
max_data_bytes = len(pcm)
pcm = ctypes.cast(pcm, c_int16_ptr)
data = (ctypes.c_char * max_data_bytes)()
ret = _lib.opus_encode(self._state, pcm, frame_size, data, max_data_bytes)
if ret < 0:
log.info('error has happened in encode')
raise OpusError(ret)
return array.array('b', data[:ret]).tobytes()
|
SaMnCo/charm-dashing
|
lib/charmhelpers/fetch/bzrurl.py
|
Python
|
agpl-3.0
| 1,463 | 0.001367 |
import os
from bzrlib.branch import Branch
from charmhelpers.fetch import (
BaseFetchHandler,
Unhandled
|
Source
)
from charmhelpers.core.host import mkdir
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('bzr+ssh', 'lp'):
re
|
turn False
else:
return True
def branch(self, source, dest):
url_parts = self.parse_url(source)
# If we use lp:branchname scheme we need to load plugins
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if url_parts.scheme == "lp":
from bzrlib.plugin import load_plugins
load_plugins()
try:
remote_branch = Branch.open(source)
remote_branch.bzrdir.sprout(dest).open_branch()
except Exception as e:
raise e
def install(self, source):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try:
self.branch(source, dest_dir)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir
|
CristianCantoro/wikipedia-tags-in-osm
|
extract_templates.py
|
Python
|
gpl-3.0
| 4,620 | 0.000433 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import requests
import wikipedia_template_parser as wtp
from lxml import etree
import re
import json
def templates_including_coords():
LINKSCOORD = "http://it.wikipedia.org/w/index.php?title="\
"Speciale:PuntanoQui/Template:Coord&namespace=10&limit=500"
req = requests.get(LINKSCOORD)
templates = list()
if req.ok:
doc = etree.fromstring(req.text)
tmpl_list = doc.xpath('//div[@id="mw-content-text"]//li/a')
templates = [tmpl.text
for tmpl in tmpl_list
if not tmpl.text.lower().endswith('/man')
]
return templates
def get_parameters(template):
try:
data = wtp.data_from_templates(template+'/man', "it")
except ValueError:
return []
try:
tabella = [d for d in data
if data[0]['name'] == 'TabellaTemplate'][0]
except IndexError:
return []
stringa_parametri = tabella['data']['parametri']
parametri = [p.replace('{{', '').replace('}}', '').split('~')[1]
for p in re.findall("{{[^{}]+}}", stringa_parametri)
]
return parametri
def write(outfile, addfile=None):
templates = templates_including_coords()
twparslist = []
for t in templates:
parametri = get_parameters(t)
twpars = {'name': t.replace('Template:', ''),
'parameters': [p for p in parametri
if 'lat' in p.lower() or 'lon' in p.lower()]
}
twparslist.append(twpars)
# with open(args.outfile, 'wb') as output:
# # Pickle the list using the highest protocol available.
# pickle.dump(twpars, output, -1)
addtwp = None
if addfile is not None:
addtwp = read(addfile)
for t in addtwp:
repeat = [(id_, tmp)
for (id_, tmp) in enumerate(twparslist)
if tmp['name'] == t['name']
]
if repeat:
id_ = repeat[0][0]
twparslist[id_] = t
addtwp.remove(t)
twparslist = twparslist + addtwp
with open(outfile, 'w+') as out:
for twp in twparslist:
out.write('{}\n'.format(json.dumps(twp)))
def read(infile):
with open(infile, 'r') as in_:
twp = [json.loads(l.strip()) for l in in_.readlines()]
return twp
def main():
# Options
text = 'Descrizione'
parser = argparse.ArgumentParser(description=text)
parser.add_argument("-f", "--file",
help='Nome del file di output con i dati '
'[default: '
'./data/wikipedia'
'/coords/templates_with_coords.txt]',
default=os.path.join("data",
"wikipedia",
"coords",
"templates_including_coords.txt"
),
action="store"
)
parser.add_argument("-a", "--add",
help='Nome del file con una lista di template '
'(serve per aggiungere alcuni template "a mano" '
'[default: '
'./data/wikipedia'
'/coords/add_templates_with_coords.txt]',
default=os.path.join("data",
"wikipedia",
"coords",
"add_templates"
"_including_coords.txt"
),
action="store"
)
parser.add_argument("--no-add",
help="Non aggiungere la lista
|
dei template",
dest='no_add',
action="store_true"
)
parser.add_argument("-r", "--read",
help="leggi il file invece di scriverlo",
action="store_true"
)
args = parser.parse_args()
if args.read:
read(args.file)
else:
if args.no_add:
|
write(args.file)
else:
write(args.file, args.add)
if __name__ == '__main__':
import argparse
import os
main()
|
bhansa/fireball
|
pyvenv/Lib/site-packages/pygame/tests/run_tests__tests/infinite_loop/fake_1_test.py
|
Python
|
gpl-3.0
| 977 | 0.008188 |
if __name__ == '__main__':
import sys
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
while Tru
|
e:
pass
def test_get_pressed(self):
self.assert_(True)
def test_name(self):
|
self.assert_(True)
def test_set_mods(self):
self.assert_(True)
def test_set_repeat(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main()
|
jgrocha/QGIS
|
tests/src/python/test_qgsvectorlayer.py
|
Python
|
gpl-2.0
| 159,533 | 0.001793 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorLayer.
From build dir, run:
ctest -R PyQgsVectorLayer -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
import os
import tempfile
import shutil
from qgis.PyQt.QtCore import QDate, QDateTime, QVariant, Qt, QDateTime, QDate, QTime
from qgis.PyQt.QtGui import QPainter, QColor
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (Qgis,
QgsWkbTypes,
QgsAction,
QgsAuxiliaryStorage,
QgsCoordinateTransformContext,
QgsDataProvider,
QgsDefaultValue,
QgsEditorWidgetSetup,
QgsMapLayer,
QgsVectorLayer,
QgsRectangle,
QgsFeature,
QgsFeatureRequest,
QgsGeometry,
QgsPointXY,
QgsField,
QgsFieldConstraints,
QgsFields,
QgsVectorLayerJoinInfo,
QgsSymbol,
QgsSingleSymbolRenderer,
QgsCoordinateReferenceSystem,
QgsVectorLayerCache,
QgsReadWriteContext,
QgsProject,
QgsUnitTypes,
QgsAggregateCalculator,
QgsPoint,
QgsExpressionContext,
QgsExpressionContextScope,
QgsExpressionContextUtils,
QgsLineSymbol,
QgsMapLayerServerProperties,
QgsMapLayerStyle,
QgsMapLayerDependency,
QgsRenderContext,
QgsPalLayerSettings,
QgsVectorLayerSimpleLabeling,
QgsSingleCategoryDiagramRenderer,
QgsDiagramLayerSettings,
QgsTextFormat,
QgsVectorLayerSelectedFeatureSource,
QgsExpression,
QgsLayerMetadata,
NULL)
from qgis.gui import (QgsAttributeTableModel,
QgsGui
)
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
from featuresourcetestbase import FeatureSourceTestCase
from utilities import unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
start_app()
def createEmptyLayer():
layer = QgsVectorLayer("Point", "addfeat", "memory")
assert layer.featureCount() == 0
return layer
def createEmptyLayerWithFields():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "addfeat", "memory")
assert layer.featureCount() == 0
return layer
def createLayerWithOnePoint():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
assert pr.addFeatures([f])
assert layer.featureCount() == 1
return layer
def createLayerWithTwoPoints():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
f2 = QgsFeature()
f2.setAttributes(["test2", 457])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
assert pr.addFeatures([f, f2])
assert layer.featureCount() == 2
return layer
def createLayerWithFivePoints():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
f2 = QgsFeature()
f2.setAttributes(["test2", 457])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(200, 200)))
f3 = QgsFeature()
f3.setAttributes(["test2", 888])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(300, 200)))
f4 = QgsFeature()
f4.setAttributes(["test3", -1])
f4.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(400, 300)))
f5 = QgsFeature()
f5.setAttributes(["test4", 0])
f5.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(0, 0)))
assert pr.addFeatures([f, f2, f3, f4, f5])
assert layer.featureCount() == 5
return layer
def createJoinLayer():
joinLayer = QgsVectorLayer(
"Point?field=x:string&field=y:integer&field=z:integer&field=date:datetime",
"joinlayer", "memory")
pr = joinLayer.dataProvider()
f1 = QgsFeature()
f1.setAttributes(["foo", 123, 321, QDateTime(QDate(2010, 1, 1))])
f1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 1)))
f2 = QgsFeature()
f2.setAttributes(["bar", 456, 654, QDateTime(QDate(2020, 1, 1))])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 2)))
f3 = QgsFeature()
f3.setAttributes(["qar", 457, 111, None])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 2)))
f4 = QgsFeature()
f4.setAttributes(["a", 458, 19, QDateTime(QDate(2012, 1, 1))])
f4.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 2)))
assert pr.addFeatures([f1, f2, f3, f4])
assert joinLayer.featureCount() == 4
return joinLayer
def dumpFeature(f):
print("--- FEATURE DUMP ---")
print(("valid: %d | id: %d" % (f.isValid(), f.id())))
geom = f.geometry()
if geom:
print(("geometry wkb: %d" % geom.wkbType()))
else:
print("no geometry")
print(("attrs: %s" % str(f.attributes())))
def formatAttributes(attrs):
return repr([str(a) for a in attrs])
def dumpEditBuffer(layer):
editBuffer = layer.editBuffer()
if not editBuffer:
print("NO EDITING!")
return
print("ADDED:")
for fid, f in editBuffer.addedFeatures().items():
print(("%d: %s | %s" % (
f.id(), formatAttributes(f.attributes()),
f.geometry().asWkt())))
print("CHANGED GEOM:")
for fid, geom in editBuffer.changedGeometries().items():
print(("%d | %s" % (f.id(), f.geometry().asWkt())))
class TestQgsVectorLayer(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&field=dt:datetime&field=date:date&field=time:time&key=pk',
'test', 'memory')
assert (vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -
|
200, NULL, 'NuLl', '5', QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)), QDate(2020, 5, 2), QTime(12, 13, 1)])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL])
f3 = QgsFeature()
f3.setAttri
|
butes([1, 100, 'Orange', 'oranGe', '1', QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)), QDate(2020, 5, 3), QTime(12, 13, 14)])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2', QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)), QDate(2020, 5, 4), QTime(12, 14, 14)])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4', QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)), QDate(2021, 5, 4), QTime(1
|
qedsoftware/commcare-hq
|
corehq/messaging/smsbackends/telerivet/migrations/0002_add_index_on_webhook_secret.py
|
Python
|
bsd-3-clause
| 464 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from
|
django.db import models, migrat
|
ions
class Migration(migrations.Migration):
dependencies = [
('telerivet', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='incomingrequest',
name='secret',
field=models.CharField(max_length=255, null=True, db_index=True),
preserve_default=True,
),
]
|
CTSNE/NodeDefender
|
NodeDefender/manage/setup/database.py
|
Python
|
mit
| 2,533 | 0.001974 |
from NodeDefender.manage.setup import (manager, print_message, print_topic,
print_info)
from flask_script import prompt
import NodeDefender
@manager.command
def database():
print_topic('Database')
print_info("Database is used to store presistant data.")
print_info("By having it disabled the data will be store in run-time RAM for the\
session")
enabled = None
while enabled is None:
enabled = prompt("Enable Database(Y/N)").upper()
if 'Y' in enabled:
enabled = True
elif 'N' in enabled:
enabled = False
else:
enabled = None
if not enabled:
NodeDefender.config.database.set(enabled = False)
if NodeDefender.config.database.write():
print_info("Database- config successfully written")
return False
supported_databases = ['mysql', 'sqlite']
engine = None
while engine is None:
engine = prompt("Enter DB Engine(SQLite, MySQL)").lower()
if engine not in s
|
upported_databases:
engine = None
host = None
port = None
username = None
password = None
database = None
if engine == "mysql":
while not host:
host = prompt('Enter Server Address')
while not port:
port = prompt('Enter Server Port')
while not username:
|
username = prompt('Enter Username')
while not password:
password = prompt('Enter Password')
while not database:
database = prompt("Enter Database Name")
filepath = None
if engine == "sqlite":
while not filepath:
print_info("Filename for SQLite Database")
print_info("SQLite will be stored as file in data- folder")
print_info(NodeDefender.config.datafolder)
print_info("Do not use any slashes in the filename")
filepath = prompt("Enter File Path")
NodeDefender.config.database.set(enabled=True,
engine=engine,
host=host,
port=port,
username=username,
password=password,
database=database,
filepath=filepath)
if NodeDefender.config.database.write():
print_info("Database- config successfully written")
return True
|
joakim-hove/ert
|
ert_gui/ertwidgets/validationsupport.py
|
Python
|
gpl-3.0
| 3,928 | 0.001018 |
from qtpy.QtCore import Qt, QPoint, QObject, Signal
from qtpy.QtGui import QColor
from qtpy.QtWidgets import QWidget, QVBoxLayout, QSizePolicy, QFrame, QLabel
import html
class ErrorPopup(QWidget):
error_template = (
"<html>"
"<table style='background-color: #ffdfdf;'width='100%%'>"
"<tr><td style='font-weight: bold; padding-left: 5px;'>Warning:</td></tr>"
"%s"
"</table>"
"</html>"
)
def __init__(self):
QWidget.__init__(self, None, Qt.ToolTip)
self.resize(300, 50)
self.setContentsMargins(0, 0, 0, 0)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self._error_widget = QLabel("")
self._error_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
self._error_widget.setFrameStyle(QFrame.Box)
self._error_widget.setWordWrap(True)
self._error_widget.setScaledContents(True)
# self.warning_widget.setAlignment(Qt.AlignHCenter)
self._erro
|
r_widget.setTextFormat(Qt.RichText)
layout.addWidget(self._error_widget)
self.setLayout(layout)
def presentError(self, widget, error):
assert isinstance(widget, QWidget)
self._error_widget.setText(ErrorPopup.error_template % html.escape(error))
self.show()
size_hint = self.sizeHint()
rect = widget.rect()
p = widget.mapToGlobal(QPoint(rect.left(), rect.top()))
|
self.setGeometry(
p.x(), p.y() - size_hint.height() - 5, size_hint.width(), size_hint.height()
)
self.raise_()
class ValidationSupport(QObject):
STRONG_ERROR_COLOR = QColor(255, 215, 215)
ERROR_COLOR = QColor(255, 235, 235)
INVALID_COLOR = QColor(235, 235, 255)
WARNING = "warning"
EXCLAMATION = "ide/small/exclamation"
validationChanged = Signal(bool)
def __init__(self, validation_target):
"""@type validation_target: QWidget"""
QObject.__init__(self)
self._validation_target = validation_target
self._validation_message = None
self._validation_type = None
self._error_popup = ErrorPopup()
self._originalEnterEvent = validation_target.enterEvent
self._originalLeaveEvent = validation_target.leaveEvent
self._originalHideEvent = validation_target.hideEvent
def enterEvent(event):
self._originalEnterEvent(event)
if not self.isValid():
self._error_popup.presentError(
self._validation_target, self._validation_message
)
validation_target.enterEvent = enterEvent
def leaveEvent(event):
self._originalLeaveEvent(event)
if self._error_popup is not None:
self._error_popup.hide()
validation_target.leaveEvent = leaveEvent
def hideEvent(hide_event):
self._error_popup.hide()
self._originalHideEvent(hide_event)
validation_target.hideEvent = hideEvent
def setValidationMessage(self, message, validation_type=WARNING):
"""Add a warning or information icon to the widget with a tooltip"""
message = message.strip()
if message == "":
self._validation_type = None
self._validation_message = None
self._error_popup.hide()
self.validationChanged.emit(True)
else:
self._validation_type = validation_type
self._validation_message = message
if (
self._validation_target.hasFocus()
or self._validation_target.underMouse()
):
self._error_popup.presentError(
self._validation_target, self._validation_message
)
self.validationChanged.emit(False)
def isValid(self):
return self._validation_message is None
|
woutdenolf/wdncrunch
|
wdncrunch/modulea/tests/test_all.py
|
Python
|
mit
| 1,623 | 0.002465 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Wout De Nolf (wout.de_nolf@esrf.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from . import test_classa
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.Test
|
Suite()
testSuite.addTest(test_classa.test_suite())
return testSuite
if __name__ == '__main__':
import sys
mysuite = test_su
|
ite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.