repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
godlike64/typhon
|
typhon/views.py
|
Python
|
gpl-3.0
| 468 | 0.004274 |
from
|
django.contrib.auth.models import User
from django.views.generic.edit import CreateView, FormView
from django.shortcuts import rende
|
r
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.core.context_processors import csrf
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
def test_page(request):
return render(request, "test.html")
|
cxsjabc/basic
|
python/message.py
|
Python
|
agpl-3.0
| 308 | 0.061688 |
#!/usr/bin/python
def message(to, text):
print "this is ", to, ":\n", text
def add(a, b):
return a + b;
message
|
('xichen', 'eyu')
print add(1,2);
def mul(a, b):
return a * b;
|
print mul(2, 3);
print mul('a', 3);
print mul(b=2, a='dd');
print 2 ** 100;
print message;
func = add;
print func(1, 2);
|
minlexx/pyevemon
|
esi_client/models/get_universe_moons_moon_id_position.py
|
Python
|
gpl-3.0
| 4,328 | 0.000231 |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetUniverseMoonsMoonIdPosition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, x=None, y=None, z=None):
"""
GetUniverseMoonsMoonIdPosition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'x': 'float',
'y': 'float',
'z': 'float'
}
self.attribute_map = {
'x': 'x',
'y': 'y',
'z': 'z'
}
self._x = x
self._y = y
self._z = z
@property
def x(self):
"""
Gets the x of this GetUniverseMoonsMoonIdPosition.
x number
:return: The x of this GetUniverseMoonsMoonIdPosition.
:rtype: float
"""
|
return self._x
@x.setter
def x(self, x):
"""
Sets the x of this GetUniverseMoonsMoonIdPosition.
x number
:param x: The x of
|
this GetUniverseMoonsMoonIdPosition.
:type: float
"""
if x is None:
raise ValueError("Invalid value for `x`, must not be `None`")
self._x = x
@property
def y(self):
"""
Gets the y of this GetUniverseMoonsMoonIdPosition.
y number
:return: The y of this GetUniverseMoonsMoonIdPosition.
:rtype: float
"""
return self._y
@y.setter
def y(self, y):
"""
Sets the y of this GetUniverseMoonsMoonIdPosition.
y number
:param y: The y of this GetUniverseMoonsMoonIdPosition.
:type: float
"""
if y is None:
raise ValueError("Invalid value for `y`, must not be `None`")
self._y = y
@property
def z(self):
"""
Gets the z of this GetUniverseMoonsMoonIdPosition.
z number
:return: The z of this GetUniverseMoonsMoonIdPosition.
:rtype: float
"""
return self._z
@z.setter
def z(self, z):
"""
Sets the z of this GetUniverseMoonsMoonIdPosition.
z number
:param z: The z of this GetUniverseMoonsMoonIdPosition.
:type: float
"""
if z is None:
raise ValueError("Invalid value for `z`, must not be `None`")
self._z = z
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetUniverseMoonsMoonIdPosition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
noirbizarre/bumpr
|
bumpr/hooks.py
|
Python
|
lgpl-3.0
| 6,499 | 0.001077 |
from __future__ import annotations
import logging
from os.path import exists
from typing import TYPE_CHECKING
from .helpers import BumprError, execute
if TYPE_CHECKING:
from typing import Optional
logger = logging.getLogger(__name__)
__all_ = (
"Hook",
"ReadTheDocHook",
"ChangelogHook",
"CommandHook",
"ReplaceHook",
"HOOKS",
)
class Hook:
key: str = ""
defaults: dict[str, Optional[str]] = {}
def __init__(self, releaser):
self.releaser = releaser
self.verbose = releaser.config.verbose
self.dryrun = releaser.config.dryrun
self.config = releaser.config[self.key]
self.validate()
def validate(self):
"""Override this method to implement initial validation"""
def bump(self, replacements):
pass
def prepare(self
|
, replacements):
pass
class ReadTheDocHook(Hook):
"""
This hook set the readthedoc url corresponding to the version
"""
key = "readthedoc"
defaults = {
"id": None,
"url": "https://{id}.readthedocs.io/en/{tag}",
"badge": "https://readthedocs.org/projects/{id}/badge/?version={tag}",
"bump": "{version}",
"prepare": "latest",
}
def url(self, tag):
return self.config.url.format(id=self.config.id, tag=tag)
|
def badge(self, tag):
return self.config.badge.format(id=self.config.id, tag=tag)
def bump(self, replacements):
replacements.insert(0, (self.badge("latest"), self.badge(self.releaser.tag_label)))
replacements.insert(0, (self.url("latest"), self.url(self.releaser.tag_label)))
def prepare(self, replacements):
replacements.insert(0, (self.badge(self.releaser.tag_label), self.badge("latest")))
replacements.insert(0, (self.url(self.releaser.tag_label), self.url("latest")))
class ChangelogHook(Hook):
"""
This hook bump the changelog version header and prepare a new section for the next release.
"""
key = "changelog"
defaults = {
"file": None,
"separator": "-",
"bump": "{version} ({date:%Y-%m-%d})",
"prepare": "Current",
"empty": "Nothing yet",
}
def validate(self):
if not self.config.get("file"):
raise BumprError("Changelog file has not been specified")
elif not exists(self.config.file):
raise BumprError("Changelog file does not exists")
def bump(self, replacements):
with open(self.config.file, "r", encoding=self.releaser.config.encoding) as changelog_file:
before = changelog_file.read()
after = before.replace(self.dev_header(), self.bumped_header())
self.releaser.perform(self.config.file, before, after)
def prepare(self, replacements):
next_header = "\n".join(
(
self.dev_header(),
"",
"- {0}".format(self.config.empty),
"",
self.bumped_header(),
)
)
with open(self.config.file, "r", encoding=self.releaser.config.encoding) as changelog_file:
before = changelog_file.read()
after = before.replace(self.bumped_header(), next_header)
self.releaser.perform(self.config.file, before, after)
def dev_header(self):
return self.underline(self.config.prepare)
def bumped_header(self):
title = self.config.bump.format(
version=self.releaser.version,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
)
return self.underline(title)
def underline(self, text):
if self.config.separator:
return "\n".join((text, len(text) * self.config.separator))
else:
return text
class CommandsHook(Hook):
"""
This hook execute commands
"""
key = "commands"
defaults = {
"bump": None,
"prepare": None,
}
def bump(self, replacements):
if self.config.bump:
replacements = dict(
version=self.releaser.version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
)
execute(
self.config.bump,
replacements=replacements,
verbose=self.verbose,
dryrun=self.dryrun,
)
def prepare(self, replacements):
if self.config.prepare:
replacements = dict(
version=self.releaser.next_version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.next_version.__dict__,
)
execute(
self.config.prepare,
replacements=replacements,
verbose=self.verbose,
dryrun=self.dryrun,
)
class ReplaceHook(Hook):
"""
This hook perform replacements in files
"""
key = "replace"
defaults: dict[str, Optional[str]] = {}
def bump(self, replacements):
replacements.insert(
0,
(
self.config.dev.format(
version=self.releaser.prev_version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.prev_version.__dict__,
),
self.config.stable.format(
version=self.releaser.version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
),
),
)
def prepare(self, replacements):
replacements.insert(
0,
(
self.config.stable.format(
version=self.releaser.version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
),
self.config.dev.format(
version=self.releaser.next_version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.next_version.__dict__,
),
),
)
HOOKS = (ReadTheDocHook, ChangelogHook, CommandsHook, ReplaceHook)
|
andrewhanlon/QCD_scripts
|
sigmond/channel.py
|
Python
|
gpl-3.0
| 7,175 | 0.012683 |
import abc
import subprocess
import logging
from observables import BLOperator, MCObservable
from data import BLDataChannel, GIDataChannel
import util
class Channel(metaclass=abc.ABCMeta):
ISOSPIN_MAP = {
'singlet': "0",
'doublet': "1h",
'triplet': "1",
'quartet': "3h",
'quintet': "2",
'sextet': "5h"
}
def __init__(self, *, particle_type=None, isospin, strangeness=None, laph_query="laph_query",
sigmond_query="sigmond_query"):
self.particle_type = particle_type
self.strangeness = strangeness
self.isospin = isospin
self.laph_query = laph_query
self.sigmond_query = sigmond_query
# @ADH - I think I am going to have the DataHandler deal with these in the future
self.raw_data_channels = list()
@staticmethod
def initialize(*, data_file, laph_query="laph_query", sigmond_query="sigmond_query",
is_basic_laph=True):
if is_basic_laph:
query_result = subprocess.check_output([laph_query, '-i', data_file]).decode()
laph_xml = util.queryToXML(query_result)
operator = BLOperator.createFromXML(laph_xml.find(".//Operator"))
if 'special' in data_file.split('/'):
return SpecialChannel(particle_type=operator.particle_type, isospin=operator.isospin,
|
strangeness=operator.strangeness, flavor=operator.flavor,
|
laph_query=laph_query, sigmond_query=sigmond_query)
elif operator.psq > 0:
return MovingChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, psq=operator.psq,
lg_irrep=operator.lg_irrep, laph_query=laph_query,
sigmond_query=sigmond_query)
else:
return AtRestChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, lg_irrep=operator.lg_irrep,
laph_query=laph_query, sigmond_query=sigmond_query)
else:
query_result = subprocess.check_output([sigmond_query, '-k', data_file]).decode()
try:
records = query_result.split('Record')
observable = MCObservable.createFromXML(util.queryToXML(records[1]))
if observable.psq > 0:
return MovingChannel(isospin=observable.isospin, psq=observable.psq,
lg_irrep=observable.lg_irrep, laph_query=laph_query,
sigmond_query=sigmond_query)
else:
return AtRestChannel(isospin=observable.isospin, lg_irrep=observable.lg_irrep,
laph_query=laph_query, sigmond_query=sigmond_query)
except IndexError:
logging.warning("%s contains no records", data_file)
except AttributeError:
logging.warning("%s contains Observables", data_file)
return None
def addRawDataChannel(self, path, is_basic_laph=True):
if is_basic_laph:
self.raw_data_channels.append(BLDataChannel(path, self.laph_query))
else:
self.raw_data_channels.append(GIDataChannel(path, self.sigmond_query))
@property
@abc.abstractmethod
def channel_string(self):
pass
@property
def is_special(self):
return isinstance(self, SpecialChannel)
@property
def is_atrest(self):
return isinstance(self, AtRestChannel)
@property
def is_moving(self):
return isinstance(self, MovingChannel)
def __hash__(self):
return hash(self.__repr__())
def __str__(self):
return self.channel_string
# @ADH - Should be checking that 'other' is an instance of an object
# derived from Channel. I'm not sure how to best do that right now.
# So, this will suffice for the moment.
def __eq__(self, other):
return self.__repr__() == other.__repr__()
def __ne__(self, other):
return self.__repr__() != other.__repr__()
def __lt__(self, other):
return self.__repr__() < other.__repr__()
def __gt__(self, other):
return self.__repr__() > other.__repr__()
def __le__(self, other):
return self.__repr__() <= other.__repr__()
def __ge__(self, other):
return self.__repr__() >= other.__repr__()
class SpecialChannel(Channel):
def __init__(self, *, particle_type, isospin, strangeness, flavor, laph_query="laph_query",
sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.flavor = flavor
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B"
elif self.particle_type == "fermion":
particle_type = "F"
strangeness = str(self.strangeness).replace('-', 'm')
return "{p_type}_{flavor}_I{isospin}_S{strangeness}_special".format(
p_type=particle_type, flavor=self.flavor, isospin=self.ISOSPIN_MAP[self.isospin],
strangeness=strangeness)
def __repr__(self):
return "SP_{}".format(self.channel_string)
class AtRestChannel(Channel):
def __init__(self, *, particle_type=None, isospin, strangeness=None, lg_irrep,
laph_query="laph_query", sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.psq = 0
self.lg_irrep = lg_irrep
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B_"
elif self.particle_type == "fermion":
particle_type = "F_"
else:
particle_type = ""
if self.strangeness is not None:
strangeness = "S{}_".format(self.strangeness).replace('-', 'm')
else:
strangeness = ""
return "{p_type}I{isospin}_{strangeness}P0_{irrep}".format(
p_type=particle_type, isospin=self.ISOSPIN_MAP[self.isospin], strangeness=strangeness,
irrep=self.lg_irrep)
def __repr__(self):
return "AR_{}".format(self.channel_string)
class MovingChannel(Channel):
def __init__(self, *, particle_type=None, isospin, strangeness=None, psq, lg_irrep,
laph_query="laph_query", sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.psq = psq
self.lg_irrep = lg_irrep
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B_"
elif self.particle_type == "fermion":
particle_type = "F_"
else:
particle_type = ""
if self.strangeness is not None:
strangeness = "S{}_".format(self.strangeness).replace('-', 'm')
else:
strangeness = ""
return "{p_type}I{isospin}_{strangeness}PSQ{psq}_{irrep}".format(
p_type=particle_type, isospin=self.ISOSPIN_MAP[self.isospin], strangeness=strangeness,
psq=self.psq, irrep=self.lg_irrep)
def __repr__(self):
return "MV_{}".format(self.channel_string)
|
lukas-bednar/jenkins-job-builder
|
tests/cmd/subcommands/test_delete.py
|
Python
|
apache-2.0
| 1,878 | 0 |
import os
from jenkins_jobs import cmd
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
@mock.patch('jenkins_jobs.builder.Jenkins.get_plugins_info', mock.MagicMock)
class DeleteTests(CmdTestsBase):
@mock.patch('jenkins_jobs.cmd.Builder.delete_job')
def test_delete_single_job(self, delete_job_mock):
"""
Test handling the deletion of a single Jenkins job.
"""
args = self.parser.parse_args(['delete', 'test_job'])
cmd.execute(args, self.config) # passes if executed without error
@mock.patch('jenkins_jobs.cmd.Builder.delete_job')
def test_delete_multiple_jobs(self, delete_job_mock):
"""
Test handling the deletion of multiple Jenkins jobs.
"""
args = self.parser
|
.parse_args(['delete', 'test_job1', 'test_job2'])
cmd.execute(args, self.config) # passes if executed without error
@mock.patch('jenkins_jobs.builder.Jenkins.delete_job')
def test_delete_using_glob_params(self, delete_job_mock):
"""
Test handling
|
the deletion of multiple Jenkins jobs using the glob
parameters feature.
"""
args = self.parser.parse_args(['delete',
'--path',
os.path.join(self.fixtures_path,
'cmd-002.yaml'),
'*bar*'])
cmd.execute(args, self.config)
calls = [mock.call('bar001'), mock.call('bar002')]
delete_job_mock.assert_has_calls(calls, any_order=True)
self.assertEqual(delete_job_mock.call_count, len(calls),
"Jenkins.delete_job() was called '%s' times when "
"expected '%s'" % (delete_job_mock.call_count,
len(calls)))
|
donce/django-cms
|
cms/admin/pageadmin.py
|
Python
|
bsd-3-clause
| 67,669 | 0.002941 |
# -*- coding: utf-8 -*-
import copy
from functools import wraps
import json
import sys
import django
from django.contrib.admin.helpers import AdminForm
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import get_deleted_objects
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist, ValidationError
from django.db import router, transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.template.defaultfilters import escape
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from cms.admin.change_list import CMSChangeList
from cms.admin.dialog.views import get_copy_dialog
from cms.admin.forms import (PageForm, AdvancedSettingsForm, PagePermissionForm,
PublicationDatesForm)
from cms.admin.permissionadmin import (PERMISSION_ADMIN_INLINES, PagePermissionInlineAdmin, ViewRestrictionInlineAdmin)
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from cms.admin.views import revert_plugins
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_PENDING
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, StaticPlaceholder
from cms.models.managers import PagePermissionsPermissionManager
from cms.plugin_pool import plugin_pool
from cms.toolbar_pool import toolbar_pool
from cms.utils import helpers, permissions, get_language_from_request, admin as admin_utils, copy_plugins
from cms.utils.i18n import get_language_list, get_language_tuple, get_language_object, force_language
from cms.utils.admin import jsonify_request
from cms.utils.compat.dj import is_installed
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import find_placeholder_relation, current_site
from cms.utils.permissions import has_global_page_permission, has_generic_permission
from cms.utils.urlutils import add_url_parameters, admin_reverse
require_POST = method_decorator(require_POST)
if is_installed('reversion'):
from reversion.admin import VersionAdmin as ModelAdmin
from reversion import create_revision
else: # pragma: no cover
from django.contrib.admin import ModelAdmin
class ReversionContext(object):
def __enter__(self):
yield
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __call__(self, func):
"""Allows this revision context to be used as a decorator."""
@wraps(func)
def do_revision_context(*args, **kwargs):
self.__enter__()
exception = False
try:
t
|
ry:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
|
finally:
if not exception:
self.__exit__(None, None, None)
return do_revision_context
def create_revision():
return ReversionContext()
PUBLISH_COMMENT = "Publish"
INITIAL_COMMENT = "Initial version."
class PageAdmin(PlaceholderAdminMixin, ModelAdmin):
form = PageForm
search_fields = ('=id', 'title_set__slug', 'title_set__title', 'reverse_id')
revision_form_template = "admin/cms/page/history/revision_header.html"
recover_form_template = "admin/cms/page/history/recover_header.html"
add_general_fields = ['title', 'slug', 'language', 'template']
change_list_template = "admin/cms/page/tree/base.html"
list_filter = ['in_navigation', 'template', 'changed_by', 'soft_root']
title_frontend_editable_fields = ['title', 'menu_title', 'page_title']
inlines = PERMISSION_ADMIN_INLINES
def get_urls(self):
"""Get the admin urls
"""
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'^([0-9]+)/advanced-settings/$', self.advanced),
pat(r'^([0-9]+)/dates/$', self.dates),
pat(r'^([0-9]+)/permission-settings/$', self.permissions),
pat(r'^([0-9]+)/delete-translation/$', self.delete_translation),
pat(r'^([0-9]+)/move-page/$', self.move_page),
pat(r'^([0-9]+)/copy-page/$', self.copy_page),
pat(r'^([0-9]+)/copy-language/$', self.copy_language),
pat(r'^([0-9]+)/dialog/copy/$', get_copy_dialog), # copy dialog
pat(r'^([0-9]+)/change-navigation/$', self.change_innavigation),
pat(r'^([0-9]+)/permissions/$', self.get_permissions),
pat(r'^([0-9]+)/undo/$', self.undo),
pat(r'^([0-9]+)/redo/$', self.redo),
pat(r'^([0-9]+)/change_template/$', self.change_template),
pat(r'^([0-9]+)/([a-z\-]+)/descendants/$', self.descendants), # menu html for page descendants
pat(r'^([0-9]+)/([a-z\-]+)/edit-field/$', self.edit_title_fields),
pat(r'^([0-9]+)/([a-z\-]+)/publish/$', self.publish_page),
pat(r'^([0-9]+)/([a-z\-]+)/unpublish/$', self.unpublish),
pat(r'^([0-9]+)/([a-z\-]+)/revert/$', self.revert_page),
pat(r'^([0-9]+)/([a-z\-]+)/preview/$', self.preview_page),
pat(r'^add-page-type/$', self.add_page_type),
pat(r'^published-pages/$', self.get_published_pagelist),
url(r'^resolve/$', self.resolve, name="cms_page_resolve"),
]
if plugin_pool.get_all_plugins():
url_patterns += plugin_pool.get_patterns()
url_patterns += super(PageAdmin, self).get_urls()
return url_patterns
def get_revision_instances(self, request, object):
"""Returns all the instances to be used in the object's revision."""
if isinstance(object, Title):
object = object.page
if isinstance(object, Page) and not object.publisher_is_draft:
object = object.publisher_public
placeholder_relation = find_placeholder_relation(object)
data = [object]
filters = {'placeholder__%s' % placeholder_relation: object}
for plugin in CMSPlugin.objects.filter(**filters):
data.append(plugin)
plugin_instance, admin = plugin.get_plugin_instance()
if plugin_instance:
data.append(plugin_instance)
if isinstance(object, Page):
titles = object.title_set.all()
for title in titles:
title.publisher_public = None
data.append(title)
return data
def save_model(self, request, obj, form, change):
"""
Move the page in the tree if necessary and save every placeholder
Content object.
"""
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if 'recover' in request.path_info:
pk = obj.pk
if obj.parent_id:
try:
parent = Page.objects.get(pk=obj.parent_id)
except Page.DoesNotExist:
parent = None
else:
parent = None
obj.pk = None
obj.path = None
obj.numchild = 0
obj.depth = 0
if parent:
saved_obj = parent.add_child(instance=obj)
else:
|
Sarsate/compute-image-packages
|
google_compute_engine/instance_setup/tests/instance_setup_test.py
|
Python
|
apache-2.0
| 14,219 | 0.004009 |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for instance_setup.py module."""
import subprocess
from google_compute_engine.instance_setup import instance_setup
from google_compute_engine.test_compat import mock
from google_compute_engine.test_compat import unittest
class InstanceSetupTest(unittest.TestCase):
def setUp(self):
self.mock_instance_config = mock.Mock()
self.mock_logger = mock.Mock()
self.mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
self.mock_setup.debug = False
self.mock_setup.instance_config = self.mock_instance_config
self.mock_setup.logger = self.mock_logger
@mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
@mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
@mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
def testInstanceSetup(self, mock_logger, mock_watcher, mock_config):
mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_config, 'config')
mocks.attach_mock(mock_setup, 'setup')
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mock_watcher_instance = mock.Mock()
mock_watcher_instance.GetMetadata.return_value = {'hello': 'world'}
mock_watcher.MetadataWatcher.return_value = mock_watcher_instance
mock_config_instance = mock.Mock()
mock_config_instance.GetOptionBool.return_value = True
mock_config.InstanceConfig.return_value = mock_config_instance
instance_setup.InstanceSetup.__init__(mock_setup)
expected_calls = [
# Setup and reading the configuration file.
mock.call.logger.Logger(
name=mock.ANY, debug=False, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.config.InstanceConfig(),
# Setup for local SSD.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'),
mock.call.setup._RunScript('optimize_local_ssd'),
# Setup for multiqueue virtio driver.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_multiqueue'),
mock.call.setup._RunScript('set_multiqueue'),
# Check network access for reaching the metadata server.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'network_enabled'),
mock.call.watcher.MetadataWatcher().GetMetadata(),
# Setup for SSH host keys if necessary.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_host_keys'),
mock.call.setup._SetSshHostKeys(),
# Setup for the boto config if necessary.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_boto_config'),
mock.call.setup._SetupBotoConfig(),
# Write the updated config file.
mock.call.config.InstanceConfig().WriteConfig(),
]
self.assertEqual(mocks.mock_calls, expected_calls)
self.assertEqual(mock_setup.metadata_dict, {'hello': 'world'})
@mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
@mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
@mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
def testInstanceSetupException(self, mock_logger, mock_watcher, mock_config):
mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_config, 'config')
mocks.attach_mock(mock_setup, 'setup')
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mock_config_instance = mock.Mock()
mock_config_instance.GetOptionBool.return_value = False
mock_config_instance.WriteConfig.side_effect = IOError('Test Error')
mock_config.InstanceConfig.return_value = mock_config_instance
instance_setup.InstanceSetup.__init__(mock_setup)
expected_calls = [
mock.call.logger.Logger(
name=mock.ANY, debug=False, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.config.InstanceConfig(),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_multiqueue'),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'network_enabled'),
mock.call.config.InstanceConfig().WriteConfig(),
mock.call.logger.Logger().warning('Test Error'),
]
self.assertEqual(mocks.mock_calls, expected_calls)
self.assertIsNone(mock_setup.metadata_dict)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess')
def testRunScript(self, mock_subprocess):
mock_readline = mock.Mock()
mock_readline.side_effect = [bytes(b'a\n'), bytes(b'b\n'), bytes(b'')]
mock_stdout = mock.Mock()
mock_stdout.readline = mock_readline
mock_process = mock.Mock()
mock_process.poll.return_value = 0
mock_process.stdout = mock_stdout
mock_subprocess.Popen.return_value = mock_process
script = '/tmp/script.py'
instance_setup.InstanceSetup._RunScript(self.mock_setup, script)
expected_calls = [mock.call('a'), mock.call('b')]
self.assertEqual(self.mock_logger.info.mock_calls, expected_calls)
mock_subprocess.Popen.assert_called_once_with(
script, shell=True, stderr=mock_subprocess.STDOUT,
stdout=mock_subprocess.P
|
IPE)
mock_process.poll.assert
|
_called_once_with()
def testGetInstanceId(self):
self.mock_setup.metadata_dict = {'instance': {'attributes': {}, 'id': 123}}
self.assertEqual(
instance_setup.InstanceSetup._GetInstanceId(self.mock_setup), '123')
self.mock_logger.warning.assert_not_called()
def testGetInstanceIdNotFound(self):
self.mock_setup.metadata_dict = {'instance': {'attributes': {}}}
self.assertIsNone(
instance_setup.InstanceSetup._GetInstanceId(self.mock_setup))
self.assertEqual(self.mock_logger.warning.call_count, 1)
@mock.patch('google_compute_engine.instance_setup.instance_setup.file_utils.SetPermissions')
@mock.patch('google_compute_engine.instance_setup.instance_setup.shutil.move')
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.tempfile.NamedTemporaryFile')
def testGenerateSshKey(
self, mock_tempfile, mock_call, mock_move, mock_permissions):
mocks = mock.Mock()
mocks.attach_mock(mock_tempfile, 'tempfile')
mocks.attach_mock(mock_call, 'call')
mocks.attach_mock(mock_move, 'move')
mocks.attach_mock(mock_permissions, 'permissions')
mocks.attach_mock(self.mock_logger, 'logger')
key_type = 'key-type'
key_dest = '/key/dest'
temp_dest = '/tmp/dest'
mock_tempfile.return_value = mock_tempfile
mock_tempfile.__enter__.return_value.name = temp_dest
instance_setup.InstanceSetup._GenerateSshKey(
self.mock_setup, key_type, key_dest)
expected_calls = [
mock.call.tempfile(prefix=key_type, delete=True),
mock.call.tempfile.__enter__(),
mock.call.tempfile.__exit__(None, None,
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/traits-4.3.0-py2.7-macosx-10.10-x86_64.egg/traits/trait_base.py
|
Python
|
gpl-2.0
| 19,017 | 0.018089 |
#------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 06/21/2002
#
# Refactored into a separate module: 07/04/2003
#
#------------------------------------------------------------------------------
""" Defines common, low-level capabilities
|
needed by the Traits package.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
import os
import sys
from os import getcwd
from os.path import dirname, exists, join
from string import lowercase, uppercase
from types import (ListType, TupleType, Dict
|
Type, StringType, UnicodeType,
IntType, LongType, FloatType, ComplexType, ClassType, TypeType)
# Set the Python version being used:
vi = sys.version_info
python_version = vi[0] + (float( vi[1] ) / 10.0)
try:
from traits.etsconfig.api import ETSConfig
except:
# If the ETSConfig package is not available, fake it:
class ETSConfig ( object ):
#-----------------------------------------------------------------------
# 'object' interface:
#-----------------------------------------------------------------------
def __init__ ( self ):
""" Constructor.
Note that this constructor can only ever be called from within
this module, since we don't expose the class.
"""
# Shadow attributes for properties:
self._application_data = None
self._toolkit = None
return
#-----------------------------------------------------------------------
# 'ETSConfig' interface:
#-----------------------------------------------------------------------
#-- Property Implementations -------------------------------------------
def _get_application_data ( self ):
""" Property getter.
This is a directory that applications and packages can safely
write non-user accessible data to i.e. configuration
information, preferences etc.
Do not put anything in here that the user might want to navigate
to (e.g. projects, user data files, etc).
The actual location differs between operating systems.
"""
if self._application_data is None:
self._application_data = self._initialize_application_data()
return self._application_data
def _set_application_data ( self, application_data ):
""" Property setter.
"""
self._application_data = application_data
application_data = property( _get_application_data,
_set_application_data )
def _get_toolkit ( self ):
"""
Property getter for the GUI toolkit. The value returned is, in
order of preference: the value set by the application; the value
passed on the command line using the '-toolkit' option; the value
specified by the 'ETS_TOOLKIT' environment variable; otherwise the
empty string.
"""
if self._toolkit is None:
self._toolkit = self._initialize_toolkit()
return self._toolkit
def _set_toolkit ( self, toolkit ):
"""
Property setter for the GUI toolkit. The toolkit can be set more
than once, but only if it is the same one each time. An application
that is written for a particular toolkit can explicitly set it
before any other module that gets the value is imported.
"""
if self._toolkit and (self._toolkit != toolkit):
raise ValueError( 'Cannot set toolkit to %s because it has '
'already been set to %s' % ( toolkit, self._toolkit ) )
self._toolkit = toolkit
return
toolkit = property( _get_toolkit, _set_toolkit )
#-- Private Methods ----------------------------------------------------
def _initialize_application_data ( self ):
""" Initializes the (default) application data directory.
"""
if sys.platform == 'win32':
environment_variable = 'APPDATA'
directory_name = 'Enthought'
else:
environment_variable = 'HOME'
directory_name = '.enthought'
# Lookup the environment variable:
parent_directory = os.environ.get( environment_variable, None )
if parent_directory is None:
raise ValueError( 'Environment variable "%s" not set' %
environment_variable )
application_data = os.path.join( parent_directory, directory_name )
# If a file already exists with this name then make sure that it is
# a directory!
if os.path.exists( application_data ):
if not os.path.isdir( application_data ):
raise ValueError( 'File "%s" already exists' %
application_data )
# Otherwise, create the directory:
else:
os.makedirs( application_data )
return application_data
def _initialize_toolkit ( self ):
""" Initializes the toolkit.
"""
# We handle the command line option even though it doesn't have the
# highest precedence because we always want to remove it from the
# command line:
if '-toolkit' in sys.argv:
opt_idx = sys.argv.index( '-toolkit' )
try:
opt_toolkit = sys.argv[ opt_idx + 1 ]
except IndexError:
raise ValueError( 'The -toolkit command line argument must '
'be followed by a toolkit name' )
# Remove the option:
del sys.argv[ opt_idx: opt_idx + 1 ]
else:
opt_toolkit = None
if self._toolkit is not None:
toolkit = self._toolkit
elif opt_toolkit is not None:
toolkit = opt_toolkit
else:
toolkit = os.environ.get( 'ETS_TOOLKIT', '' )
return toolkit
ETSConfig = ETSConfig()
#-------------------------------------------------------------------------------
# Provide Python 2.3+ compatible definitions (if necessary):
#-------------------------------------------------------------------------------
try:
from types import BooleanType
except ImportError:
BooleanType = IntType
def _enumerate ( seq ):
for i in xrange( len( seq) ):
yield i, seq[i]
try:
enumerate = enumerate
except:
enumerate = _enumerate
del _enumerate
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
ClassTypes = ( ClassType, TypeType )
SequenceTypes = ( ListType, TupleType )
ComplexTypes = ( float, int )
TypeTypes = ( StringType, UnicodeType, IntType, LongType, FloatType,
ComplexType, ListType, TupleType, DictType, BooleanType )
TraitNotifier = '__trait_notifier__'
# The standard Traits property cache prefix:
TraitsCache = '_traits_cache_'
#-------------------------------------------------------------------------------
# Singleton 'Uninitialized' object:
#---------------------------------------
|
hjanime/VisTrails
|
vistrails/core/modules/basic_modules.py
|
Python
|
bsd-3-clause
| 69,112 | 0.002879 |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""basic_modules defines basic VisTrails Modules that are used in most
pipelines."""
from __future__ import division
import vistrails.core.cache.hasher
from vistrails.core.debug import format_exception
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.vistrails_module import Module, new_module, \
Converter, NotCacheable, ModuleError
from vistrails.core.modules.config import ConstantWidgetConfig, \
QueryWidgetConfig, ParamExpWidgetConfig, ModuleSettings, IPort, OPort, \
CIPort
import vistrails.core.system
from vistrails.core.utils import InstanceObject
from vistrails.core import debug
from abc import ABCMeta
from ast import literal_eval
from itertools import izip
import mimetypes
import os
import pickle
import re
import shutil
import zipfile
import urllib
try:
import hashlib
sha_hash = hashlib.sha1
except ImportError:
import sha
sha_hash = sha.new
###############################################################################
version = '2.1.1'
name = 'Basic Modules'
identifier = 'org.vistrails.vistrails.basic'
old_identifiers = ['edu.utah.sci.vistrails.basic']
constant_config_path = "vistrails.gui.modules.constant_configuration"
query_config_path = "vistrails.gui.modules.query_configuration"
paramexp_config_path = "vistrails.gui.modules.paramexplore"
def get_port_name(port):
if hasattr(port, 'name'):
return port.name
else:
return port[0]
class meta_add_value_ports(type):
def __new__(cls, name, bases, dct):
"""This metaclass adds the 'value' input
|
and output ports.
"""
mod = type.__new__(cls, name, bases, dct)
if '_
|
input_ports' in mod.__dict__:
input_ports = mod._input_ports
if not any(get_port_name(port_info) == 'value'
for port_info in input_ports):
mod._input_ports = [('value', mod)]
mod._input_ports.extend(input_ports)
else:
mod._input_ports = [('value', mod)]
if '_output_ports' in mod.__dict__:
output_ports = mod._output_ports
if not any(get_port_name(port_info) == 'value'
for port_info in output_ports):
mod._output_ports = [('value', mod)]
mod._output_ports.extend(output_ports)
else:
mod._output_ports = [('value', mod)]
return mod
class Constant(Module):
"""Base class for all Modules that represent a constant value of
some type.
When implementing your own constant, You have to adhere to the
following interface:
Implement the following methods:
translate_to_python(x): Given a string, translate_to_python
must return a python value that will be the value seen by the
execution modules.
For example, translate_to_python called on a float parameter
with value '3.15' will return float('3.15').
translate_to_string(): Return a string representation of the
current constant, which will eventually be passed to
translate_to_python.
validate(v): return True if given python value is a plausible
value for the constant. It should be implemented such that
validate(translate_to_python(x)) == True for all valid x
A constant must also expose its default value, through the field
default_value.
There are fields you are not allowed to use in your constant classes.
These are: 'id', 'interpreter', 'logging' and 'change_parameter'
You can also define the constant's own GUI widget.
See core/modules/constant_configuration.py for details.
"""
_settings = ModuleSettings(abstract=True)
_output_ports = [OPort("value_as_string", "String")]
__metaclass__ = meta_add_value_ports
@staticmethod
def validate(x):
raise NotImplementedError
@staticmethod
def translate_to_python(x):
raise NotImplementedError
def compute(self):
"""Constant.compute() only checks validity (and presence) of
input value."""
v = self.get_input("value")
b = self.validate(v)
if not b:
raise ModuleError(self, "Internal Error: Constant failed validation")
self.set_output("value", v)
self.set_output("value_as_string", self.translate_to_string(v))
def setValue(self, v):
self.set_output("value", self.translate_to_python(v))
self.upToDate = True
@staticmethod
def translate_to_string(v):
return str(v)
@staticmethod
def get_widget_class():
# return StandardConstantWidget
return None
@staticmethod
def query_compute(value_a, value_b, query_method):
if query_method == '==' or query_method is None:
return (value_a == value_b)
elif query_method == '!=':
return (value_a != value_b)
return False
def new_constant(name, py_conversion=None, default_value=None, validation=None,
widget_type=None,
str_conversion=None, base_class=Constant,
compute=None, query_compute=None):
"""new_constant(name: str,
py_conversion: callable,
default_value: python_type,
validation: callable,
widget_type: (path, name) tuple or QWidget type,
str_conversion: callable,
base_class: class,
compute: callable,
query_compute: static callable) -> Module
new_constant dynamically creates a new Module derived from
Constant with given py_conversion and str_conversion functions, a
corresponding python type and a widget type. py_conversion is a
python callable that takes a string and returns a python value of
the type that the class should hold. str_conversion does the reverse.
This is the quickest way to create new Constant Modules."""
d = {}
if py_conversion is not None:
d["translate_to_python"] = py_conversion
elif base_class == Constant:
raise ValueError("Must specify translate_to_python for constant")
if validation is not None:
d["validate"]
|
xcyan/models
|
ptn/model_ptn.py
|
Python
|
apache-2.0
| 8,258 | 0.004481 |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations for Im2Vox PTN (NIPS16) model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import losses
import metrics
import model_voxel_generation
import utils
from nets import im2vox_factory
slim = tf.contrib.slim
class model_PTN(model_voxel_generation.Im2Vox): # pylint:disable=invalid-name
"""Inherits the generic Im2Vox model class and implements the functions."""
def __init__(self, params):
super(model_PTN, self).__init__(params)
# For testing, this selects all views in input
def preprocess_with_all_views(self, raw_inputs):
(quantity, num_views) = raw_inputs['images'].get_shape().as_list()[:2]
inputs = dict()
inputs['voxels'] = []
inputs['images_1'] = []
for k in xrange(num_views):
inputs['matrix_%d' % (k + 1)] = []
inputs['matrix_1'] = []
for n in xrange(quantity):
for k in xrange(num_views):
inputs['images_1'].append(raw_inputs['images'][n, k, :, :, :])
inputs['voxels'].append(raw_inputs['voxels'][n, :, :, :, :])
tf_matrix = self.get_transform_matrix(k)
inputs['matrix_%d' % (k + 1)].append(tf_matrix)
inputs['images_1'] = tf.stack(inputs['images_1'])
inputs['voxels'] = tf.stack(inputs['voxels'])
for k in xrange(num_views):
inputs['matrix_%d' % (k + 1)] = tf.stack(inputs['matrix_%d' % (k + 1)])
return inputs
def get_model_fn(self, is_training=True, reuse=False, run_projection=True):
return im2vox_factory.get(self._params, is_training, reuse, run_projection)
def get_regularization_loss(self, scopes):
return losses.regularization_loss(scopes, self._params)
def get_loss(self, inputs, outputs):
"""Computes the loss used for PTN paper (projection + volume loss)."""
g_loss = tf.zeros(dtype=tf.float32, shape=[])
if self._params.proj_weight:
g_loss += losses.add_volume_proj_loss(
inputs, outputs, self._params.step_size, self._params.proj_weight)
if self._params.volume_weight:
g_loss += losses.add_volume_loss(inputs, outputs, 1,
self._params.volume_weight)
slim.summaries.add_scalar_summary(g_loss, 'im2vox_loss', prefix='losses')
return g_loss
def get_metrics(self, inputs, outputs):
"""Aggregate the metrics for voxel generation model.
Args:
inputs: Input dictionary of the voxel generation model.
outputs: Output dictionary returned by the voxel generation model.
Returns:
names_to_values: metrics->values (dict).
names_to_updates: metrics->ops (dict).
"""
names_to_values = dict()
names_to_updates = dict()
tmp_values, tmp_updates = metrics.add_volume_iou_metrics(inputs, outputs)
names_to_values.update(tmp_values)
names_to_updates.update(tmp_updates)
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(
value, name, prefix='eval', print_summary=True)
return names_to_values, names_to_updates
def write_disk_grid(self,
global_step,
log_dir,
input_images,
gt_projs,
pred_projs,
input_voxels=None,
output_voxels=None):
"""Function called by TF to save the prediction periodically."""
summary_freq = self._params.save_every
def write_grid(input_images, gt_projs, pred_projs, global_step,
input_voxels, output_voxels):
"""Native python function to call for writing images to files."""
grid = _build_image_grid(
input_images,
gt_projs,
pred_projs,
input_voxels=input_voxels,
output_voxels=output_voxels)
if global_step % summary_freq == 0:
img_p
|
ath = os.path.join(log_dir, '%s.jpg' % str(global_step))
|
utils.save_image(grid, img_path)
return grid
save_op = tf.py_func(write_grid, [
input_images, gt_projs, pred_projs, global_step, input_voxels,
output_voxels
], [tf.uint8], 'write_grid')[0]
slim.summaries.add_image_summary(
tf.expand_dims(save_op, axis=0), name='grid_vis')
return save_op
def get_transform_matrix(self, view_out):
"""Get the 4x4 Perspective Transfromation matrix used for PTN."""
num_views = self._params.num_views
focal_length = self._params.focal_length
focal_range = self._params.focal_range
phi = 30
theta_interval = 360.0 / num_views
theta = theta_interval * view_out
# pylint: disable=invalid-name
camera_matrix = np.zeros((4, 4), dtype=np.float32)
intrinsic_matrix = np.eye(4, dtype=np.float32)
extrinsic_matrix = np.eye(4, dtype=np.float32)
sin_phi = np.sin(float(phi) / 180.0 * np.pi)
cos_phi = np.cos(float(phi) / 180.0 * np.pi)
sin_theta = np.sin(float(-theta) / 180.0 * np.pi)
cos_theta = np.cos(float(-theta) / 180.0 * np.pi)
rotation_azimuth = np.zeros((3, 3), dtype=np.float32)
rotation_azimuth[0, 0] = cos_theta
rotation_azimuth[2, 2] = cos_theta
rotation_azimuth[0, 2] = -sin_theta
rotation_azimuth[2, 0] = sin_theta
rotation_azimuth[1, 1] = 1.0
## rotation axis -- x
rotation_elevation = np.zeros((3, 3), dtype=np.float32)
rotation_elevation[0, 0] = cos_phi
rotation_elevation[0, 1] = sin_phi
rotation_elevation[1, 0] = -sin_phi
rotation_elevation[1, 1] = cos_phi
rotation_elevation[2, 2] = 1.0
rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)
displacement = np.zeros((3, 1), dtype=np.float32)
displacement[0, 0] = float(focal_length) + float(focal_range) / 2.0
displacement = np.matmul(rotation_matrix, displacement)
extrinsic_matrix[0:3, 0:3] = rotation_matrix
extrinsic_matrix[0:3, 3:4] = -displacement
intrinsic_matrix[2, 2] = 1.0 / float(focal_length)
intrinsic_matrix[1, 1] = 1.0 / float(focal_length)
camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)
return camera_matrix
def _build_image_grid(input_images,
gt_projs,
pred_projs,
input_voxels,
output_voxels,
vis_size=128):
"""Builds a grid image by concatenating the input images."""
quantity = input_images.shape[0]
for row in xrange(int(quantity / 3)):
for col in xrange(3):
index = row * 3 + col
input_img_ = utils.resize_image(input_images[index, :, :, :], vis_size,
vis_size)
gt_proj_ = utils.resize_image(gt_projs[index, :, :, :], vis_size,
vis_size)
pred_proj_ = utils.resize_image(pred_projs[index, :, :, :], vis_size,
vis_size)
gt_voxel_vis = utils.resize_image(
utils.display_voxel(input_voxels[index, :, :, :, 0]), vis_size,
vis_size)
pred_voxel_vis = utils.resize_image(
utils.display_voxel(output_voxels[index, :, :, :, 0]), vis_size,
vis_size)
if col == 0:
tmp_ = np.concatenate(
[input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis], 1)
else:
tmp_ = np.concatenate([
tmp_, input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis
], 1)
if row == 0:
out_grid = tmp_
|
prior/webinars
|
webinars_web/webinars/views/hub_syncs.py
|
Python
|
apache-2.0
| 1,234 | 0.010535 |
from django.shortcuts import render_to_response
from django.template import RequestContext
from webinars_web.webinars.views import syncs
def show(request, sync_id):
from webinars_web.webinars import models as wm
return syncs._show(request, 'hub', wm.HubSync.objects.select_related('hub').get(pk=sync_id))
def new(request, hub_id):
from webinars_web.webinars import models as wm
return syncs._new(request, 'hub', wm.Hub.objects.get(pk=hub_id))
def interrupt(request, hub_id):
from webinars_web.we
|
binars import models as wm
return syncs._interrupt(request, 'hub', wm.Hub.objects.get(pk=hub_id))
def list(request, hub_id):
from webinars_web.webinars import models as wm
hub = wm.Hub.objects.get(pk=hub_id)
hub_syncs = wm.HubSync.objects.filter(hub=hub).order_by('-started_at')
account_syncs = wm.AccountSyn
|
c.objects.filter(account__hub=hub, parent__isnull=True).order_by('-started_at')
event_syncs = wm.EventSync.objects.filter(event__account__hub=hub, parent__isnull=True).order_by('-started_at')
return render_to_response('hub_syncs/list.djml', {'hub':hub, 'hub_syncs':hub_syncs, 'account_syncs':account_syncs, 'event_syncs':event_syncs}, context_instance=RequestContext(request))
|
apoorva-sharma/deep-frame-interpolation
|
tensorflow_tutorials-master/python/09_convolutional_autoencoder.py
|
Python
|
mit
| 5,033 | 0.000199 |
"""Tutorial on how to create a convolutional autoencoder w/ Tensorflow.
Parag K. Mital, Jan 2016
"""
import tensorflow as tf
import numpy as np
import math
from libs.activations import lrelu
from libs.utils import corrupt
# %%
def autoencoder(input_shape=[None, 784],
n_filters=[1, 10, 10, 10],
filter_sizes=[3, 3, 3, 3],
corruption=False):
"""Build a deep denoising autoencoder w/ tied weights.
Parameters
----------
input_shape : list, optional
Description
n_filters : list, optional
Description
filter_sizes : list, optional
Description
Returns
-------
x : Tensor
Input placeholder to the network
z : Tensor
Inner-most latent representation
y : Tensor
Output reconstruction of the input
cost : Tensor
Overall cost to use for training
Raises
------
ValueError
Description
"""
# %%
# input to the network
x = tf.placeholder(
tf.float32, input_shape, name='x')
# %%
# ensure 2-d is converted to square tensor.
if len(x.get_shape()) == 2:
x_dim = np.sqrt(x.get_shape().as_list()[1])
if x_dim != int(x_dim):
raise ValueError('Unsupported input dimensions')
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, n_filters[0]])
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
current_input = x_tensor
# %%
# Optionally apply denoising autoencoder
if corruption:
current_input = corrupt(current_input)
# %%
# Build the encoder
encoder = []
shapes = []
for layer_i, n_output in enumerate(n_filters[1:]):
n_input = current_input.get_shape().as_list()[3]
shapes.append(current_input.get_shape().as_list())
W = tf.Variable(
tf.random_uniform([
filter_sizes[layer_i],
filter_sizes[layer_i],
n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = lrelu(
tf.add(tf.nn.conv2d(
current_input, W, strides=[1, 2, 2, 1], padding='SAME'), b))
current_input = output
# %%
# store the latent representation
z = current_input
encoder.reverse()
shapes.reverse()
# %%
# Build the decoder using the same weights
for layer_i, shape in enumerate(shapes):
W = encoder[layer_i]
b = tf.Variable(tf.zeros([W.get_shape().as_list()[2]]))
output = lrelu(tf.add(
tf.nn.conv2d_transpose(
current_input, W,
tf.pack([tf.shape(x)[0], shape[1], shape[2], shape[3]]),
strides=[1, 2, 2, 1], padding='SAME'), b))
current_in
|
put = output
# %%
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference
cost = tf.reduce_sum(tf.square(y - x_tensor))
# %%
return {'x': x, 'z': z, 'y': y, 'cost': cost}
# %%
def test_mnist():
"""Test the convolutional autoencder using MNIST."""
# %%
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
|
import matplotlib.pyplot as plt
# %%
# load MNIST as before
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
mean_img = np.mean(mnist.train.images, axis=0)
ae = autoencoder()
# %%
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
# %%
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %%
# Fit all training data
batch_size = 100
n_epochs = 10
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, _ = mnist.train.next_batch(batch_size)
train = np.array([img - mean_img for img in batch_xs])
sess.run(optimizer, feed_dict={ae['x']: train})
print(epoch_i, sess.run(ae['cost'], feed_dict={ae['x']: train}))
# %%
# Plot example reconstructions
n_examples = 10
test_xs, _ = mnist.test.next_batch(n_examples)
test_xs_norm = np.array([img - mean_img for img in test_xs])
recon = sess.run(ae['y'], feed_dict={ae['x']: test_xs_norm})
print(recon.shape)
fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
for example_i in range(n_examples):
axs[0][example_i].imshow(
np.reshape(test_xs[example_i, :], (28, 28)))
axs[1][example_i].imshow(
np.reshape(
np.reshape(recon[example_i, ...], (784,)) + mean_img,
(28, 28)))
fig.show()
plt.draw()
plt.waitforbuttonpress()
# %%
if __name__ == '__main__':
test_mnist()
|
dwillmer/numpy
|
numpy/distutils/system_info.py
|
Python
|
bsd-3-clause
| 85,113 | 0.000881 |
#!/bin/env python
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Currently, the following
classes are available:
atlas_info
atlas_threads_info
atlas_blas_info
atlas_blas_threads_info
lapack_atlas_info
lapack_atlas_threads_info
atlas_3_10_info
atlas_3_10_threads_info
atlas_3_10_blas_info,
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
blas_info
lapack_info
openblas_info
blis_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
x11_info
lapack_src_info
blas_src_info
numpy_info
numarray_info
numpy_info
boost_python_info
agg2_info
wx_info
gdk_pixbuf_xlib_2_info
gdk_pixbuf_2_info
gdk_x11_2_info
gtkp_x11_2_info
gtkp_2_info
xft_info
freetype2_info
umfpack_info
Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL has options that are the default for each section. The
available sections are fftw, atlas, and x11. Appropriate defaults are
used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. ALL section in site.cfg
Only the first complete match is returned.
Example:
----------
[ALL]
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
fftw_libs = rfftw, fftw
fftw_opt_libs = rfftw_threaded, fftw_threaded
# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
atlas_libs = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Authors:
Pearu Peterson <pearu@cens.ioc.ee>, February 2002
David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import copy
import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError
from ConfigParser import RawConfigParser as ConfigParser
else:
from configparser import NoOptionError
from configparser import RawConfigParser as ConfigParser
# It seems that some people are importing ConfigParser from here so is
# good to keep its class name. Use of RawConfigParser is needed in
# order to be able to load path names with percent in them, like
# `feature%2Fcool` which is common on git flow branch names.
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import \
find_executable, exec_command, get_pythonexe
from numpy.distutils.misc_util import is_sequence, is_string, \
get_shared_lib_extension
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
import distutils.ccompiler
import tempfile
import shutil
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(distutils.sysconfig.EXEC_PREFIX,
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/u
|
sr/local/include',
|
'/opt/include', '/usr/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',
'/usr/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subcla
|
gklyne/annalist
|
src/annalist_root/annalist/tests/test_recordview.py
|
Python
|
mit
| 56,603 | 0.010918 |
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Tests for RecordView module and view
Note: this module tests for rendering specifically for RecordView values, using
view description sitedata files, and as such duplicates some tests covered by
module test_entitygenericedit.
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import json
import unittest
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from django.http import QueryDict
from django.contrib.auth.models import User
from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions
from django.test.client import Client
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist import message
from annalist.models.site import Site
from annalist.models.sitedata import SiteData
|
from annalist.models.collection import Collection
from annalist.models.recordview import RecordView
from annalist.models.recordfield import RecordField
f
|
rom annalist.views.uri_builder import uri_with_params
from annalist.views.recordviewdelete import RecordViewDeleteConfirmedView
from annalist.views.form_utils.fieldchoice import FieldChoice
from .AnnalistTestCase import AnnalistTestCase
from .tests import (
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .init_tests import (
init_annalist_test_site,
init_annalist_test_coll,
install_annalist_named_coll,
create_test_coll_inheriting,
init_annalist_named_test_coll,
resetSitedata
)
from .entity_testutils import (
make_message, make_quoted_message,
site_dir, collection_dir,
site_view_url, collection_edit_url,
collection_entity_view_url,
collection_create_values,
render_select_options,
create_test_user,
context_field_map,
context_view_field,
context_bind_fields,
check_context_field, check_context_field_value,
)
from .entity_testviewdata import (
recordview_dir,
recordview_coll_url, recordview_url, recordview_edit_url,
recordview_value_keys, recordview_load_keys,
recordview_create_values, recordview_values, recordview_read_values,
view_view_context_data,
default_view_fields_list, view_view_fields_list,
view_view_form_data,
recordview_delete_confirm_form_data
)
from .entity_testentitydata import (
entity_url, entitydata_edit_url, entitydata_list_type_url,
default_fields, default_label, default_comment, error_label,
layout_classes
)
from .entity_testsitedata import (
make_field_choices, no_selection,
get_site_default_entity_fields_sorted,
get_site_bibentry_fields_sorted
)
# -----------------------------------------------------------------------------
#
# RecordView tests
#
# -----------------------------------------------------------------------------
class RecordViewTest(AnnalistTestCase):
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.sitedata = SiteData(self.testsite)
self.testcoll = Collection(self.testsite, "testcoll")
self.layout = (
{ 'enum_field_placement_id': layout.ENUM_FIELD_PLACEMENT_ID
, 'enum_list_type_id': layout.ENUM_LIST_TYPE_ID
, 'enum_render_type_id': layout.ENUM_RENDER_TYPE_ID
, 'enum_value_type_id': layout.ENUM_VALUE_TYPE_ID
, 'enum_value_mode_id': layout.ENUM_VALUE_MODE_ID
, 'field_typeid': layout.FIELD_TYPEID
, 'group_typeid': layout.GROUP_TYPEID
, 'list_typeid': layout.LIST_TYPEID
, 'type_typeid': layout.TYPE_TYPEID
, 'user_typeid': layout.USER_TYPEID
, 'view_typeid': layout.VIEW_TYPEID
, 'vocab_typeid': layout.VOCAB_TYPEID
, 'field_dir': layout.FIELD_DIR
, 'group_dir': layout.GROUP_DIR
, 'list_dir': layout.LIST_DIR
, 'type_dir': layout.TYPE_DIR
, 'user_dir': layout.USER_DIR
, 'view_dir': layout.VIEW_DIR
, 'vocab_dir': layout.VOCAB_DIR
})
return
def tearDown(self):
return
@classmethod
def setUpClass(cls):
super(RecordViewTest, cls).setUpClass()
return
@classmethod
def tearDownClass(cls):
super(RecordViewTest, cls).tearDownClass()
resetSitedata(scope="collections")
return
def test_RecordViewTest(self):
self.assertEqual(Collection.__name__, "Collection", "Check Collection class name")
return
def test_recordview_init(self):
t = RecordView(self.testcoll, "testview")
u = recordview_coll_url(self.testsite, coll_id="testcoll", view_id="testview")
self.assertEqual(t._entitytype, ANNAL.CURIE.View)
self.assertEqual(t._entityfile, layout.VIEW_META_FILE)
self.assertEqual(t._entityref, layout.COLL_BASE_VIEW_REF%{'id': "testview"})
self.assertEqual(t._entityid, "testview")
self.assertEqual(t._entityurl, u)
self.assertEqual(t._entitydir, recordview_dir(view_id="testview"))
self.assertEqual(t._values, None)
return
def test_recordview1_data(self):
t = RecordView(self.testcoll, "view1")
self.assertEqual(t.get_id(), "view1")
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
self.assertIn(
"/c/testcoll/d/%(view_dir)s/view1/"%self.layout,
t.get_url()
)
self.assertEqual(
TestBaseUri + "/c/testcoll/d/%(view_typeid)s/view1/"%self.layout,
t.get_view_url()
)
t.set_values(recordview_create_values(view_id="view1"))
td = t.get_values()
self.assertEqual(set(td.keys()), set(recordview_value_keys()))
v = recordview_values(view_id="view1")
self.assertDictionaryMatch(td, v)
return
def test_recordview2_data(self):
t = RecordView(self.testcoll, "view2")
self.assertEqual(t.get_id(), "view2")
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
self.assertIn(
"/c/testcoll/d/%(view_dir)s/view2/"%self.layout,
t.get_url()
)
self.assertEqual(
TestBaseUri + "/c/testcoll/d/%(view_typeid)s/view2/"%self.layout,
t.get_view_url()
)
t.set_values(recordview_create_values(view_id="view2"))
td = t.get_values()
self.assertEqual(set(td.keys()), set(recordview_value_keys()))
v = recordview_values(view_id="view2")
self.assertDictionaryMatch(td, v)
return
def test_recordview_create_load(self):
t = RecordView.create(self.testcoll, "view1", recordview_create_values(view_id="view1"))
td = RecordView.load(self.testcoll, "view1").get_values()
v = recordview_read_values(view_id="view1")
self.assertKeysMatch(td, v)
self.assertDictionaryMatch(td, v)
return
def test_recordview_default_data(self):
t = RecordView.load(self.testcoll, "Default_view", altscope="all")
self.assertEqual(t.get_id(), "Default_view")
self.assertIn(
"/c/_annalist_site/d/%(view_dir)s/Default_view"%self.layout,
t.get_url()
)
self.assertIn(
"/c/testcoll/d/%(view_typeid)s/Default_view"%self.layout,
t.ge
|
rcosnita/fantastico
|
fantastico/oauth2/models/tests/__init__.py
|
Python
|
mit
| 1,175 | 0.00766 |
'''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modif
|
y, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, IN
|
CLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: fantastico.oauth2.models.tests
'''
|
BitWriters/Zenith_project
|
zango/lib/python3.5/site-packages/django/middleware/security.py
|
Python
|
mit
| 1,753 | 0.001141 |
import re
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
class SecurityMiddleware(object):
def __init__(self):
self.sts_seconds = settings.SECURE_HSTS_SECONDS
self.sts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS
self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF
self.xss_filter = settings.SECURE_BROWSER_XSS_FILTER
self.redirect = settings.SECURE_SSL_REDIRECT
self.redirect_host = settings.SECURE_SSL_HOST
self.redirect_exempt = [re.compile(r) for r in settings.SECURE_REDIRECT_EXEMPT]
def process_request(self, request):
path = request.path.lstrip("/")
if (self.redirect and not request.is_secure() and
not any(pattern.search(path)
for pattern in self.redirect_exempt)):
host = self.redirect_host or request.get_host()
return HttpResponsePermanentRedirect(
"https://%s%s" % (host, request.get_full_path())
)
def process_response(self, request, response):
if (self.sts_seconds and request.is_secure() and
'strict-transport-security' not in response):
sts_head
|
er = "max-age=%s" % self.sts_seconds
|
if self.sts_include_subdomains:
sts_header = sts_header + "; includeSubDomains"
response["strict-transport-security"] = sts_header
if self.content_type_nosniff and 'x-content-type-options' not in response:
response["x-content-type-options"] = "nosniff"
if self.xss_filter and 'x-xss-protection' not in response:
response["x-xss-protection"] = "1; mode=block"
return response
|
taikoa/wevolver-server
|
wevolve/projects/migrations/0006_auto__add_field_userproject_drive_auth.py
|
Python
|
agpl-3.0
| 8,173 | 0.007708 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProject.drive_auth'
db.add_column(u'user_project', 'drive_auth',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProject.drive_auth'
db.delete_column(u'user_project', 'drive_auth')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'p
|
assword': ('django.db.models.fields.CharField', []
|
, {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'home.category': {
'Meta': {'object_name': 'Category', 'db_table': "u'category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'})
},
'projects.project': {
'Meta': {'object_name': 'Project', 'db_table': "u'project'"},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['home.Category']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_original_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'licence': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'type_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'db_column': "'type'", 'blank': 'True'})
},
'projects.projectpart': {
'Meta': {'object_name': 'ProjectPart', 'db_table': "u'project_part'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projectpart_created_user'", 'to': "orm['auth.User']"}),
'drive_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'projectpart_modified_user'", 'null': 'True', 'to': "orm['auth.User']"}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'project_part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.ProjectPart']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'projects.userproject': {
'Meta': {'object_name': 'UserProject', 'db_table': "u'user_project'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userproject_created_user'", 'to': "orm['auth.User']"}),
'drive_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'userproject_modified_user'", 'null': 'True', 'to': "orm['auth.User']"}),
'permission': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'db_column': "'project_id'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['projects']
|
zbanks/shmooze
|
setup.py
|
Python
|
mit
| 1,029 | 0.013605 |
#!/usr/bin/env python
import fnmatch
import glob
import os
import sys
from setuptools import setup
with open("requirements.txt") as f:
required = f.read().splitlines()
VERSION = "1.2.4"
setup(
name='shmooze',
version=VERSION,
description='Framework for processed-backed web applications',
author='Zach Banks',
author_email='zbanks@mit.edu',
url='https
|
://github.com/zbanks/shmooze',
packages=[
'shmooze',
'shmooze.wsgi',
'shmooze.modules',
'shmooze.lib',
],
download_url="https://github.com/zbanks/shmooze/tarball/{}".format(VERSION),
zip_safe=False,
install_requires=required,
scripts=[
"bin/sh
|
mooze",
"bin/shmz",
],
package_dir = {
},
package_data={
'musicazoo': [
"../supervisord.conf",
"../requirements.txt",
"../settings.json",
'../static/settings.json',
'../static/*.js',
'../static/*.html',
],
},
)
|
prasannav7/ggrc-core
|
src/ggrc/fulltext/mysql.py
|
Python
|
apache-2.0
| 16,137 | 0.00471 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models import all_models
from ggrc.models.object_person import ObjectPerson
from ggrc.models.object_owner import ObjectOwner
from ggrc.models.relationship import Relationship
from ggrc_basic_permissions.models import UserRole
from ggrc_basic_permissions import objects_via_assignable_query
from ggrc_basic_permissions import program_relationship_query
from ggrc.rbac import permissions, context_query_filter
from sqlalchemy import \
event, and_, or_, literal, union, alias, case, func, distinct
from sqlalchemy.sql import false
from sqlalchemy.schema import DDL
from sqlalchemy.ext.declarative import declared_attr
from .sql import SqlIndexer
class MysqlRecordProperty(db.Model):
__tablename__ = 'fulltext_record_properties'
key = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(64), primary_key=True)
context_id = db.Column(db.Integer)
tags = db.Column(db.String)
pr
|
operty = db.Column(db.String(64), prim
|
ary_key=True)
content = db.Column(db.Text)
@declared_attr
def __table_args__(self):
return (
# NOTE
# This is here to prevent Alembic from wanting to drop the index, but
# the DDL below or a similar Alembic migration should be used to create
# the index.
db.Index('{}_text_idx'.format(self.__tablename__), 'content'),
# These are real indexes
db.Index('ix_{}_key'.format(self.__tablename__), 'key'),
db.Index('ix_{}_type'.format(self.__tablename__), 'type'),
db.Index('ix_{}_tags'.format(self.__tablename__), 'tags'),
db.Index('ix_{}_context_id'.format(self.__tablename__), 'context_id'),
# Only MyISAM supports fulltext indexes until newer MySQL/MariaDB
{'mysql_engine': 'myisam'},
)
event.listen(
MysqlRecordProperty.__table__,
'after_create',
DDL('ALTER TABLE {tablename} ADD FULLTEXT INDEX {tablename}_text_idx '
'(content)'.format(tablename=MysqlRecordProperty.__tablename__))
)
class MysqlIndexer(SqlIndexer):
record_type = MysqlRecordProperty
def _get_type_query(self, model_names, permission_type='read',
permission_model=None):
type_queries = []
for model_name in model_names:
type_query = None
if permission_type == 'read':
contexts = permissions.read_contexts_for(
permission_model or model_name)
resources = permissions.read_resources_for(
permission_model or model_name)
elif permission_type == 'create':
contexts = permissions.create_contexts_for(
permission_model or model_name)
resources = permissions.create_resources_for(
permission_model or model_name)
elif permission_type == 'update':
contexts = permissions.update_contexts_for(
permission_model or model_name)
resources = permissions.update_resources_for(
permission_model or model_name)
elif permission_type == 'delete':
contexts = permissions.delete_contexts_for(
permission_model or model_name)
resources = permissions.delete_resources_for(
permission_model or model_name)
if permission_model and contexts:
contexts = set(contexts) & set(
permissions.read_contexts_for(model_name))
if contexts is not None:
# Don't filter out None contexts here
if None not in contexts and permission_type == "read":
contexts.append(None)
if resources:
resource_sql = and_(
MysqlRecordProperty.type == model_name,
MysqlRecordProperty.key.in_(resources))
else:
resource_sql = false()
type_query = or_(
and_(
MysqlRecordProperty.type == model_name,
context_query_filter(MysqlRecordProperty.context_id, contexts)
),
resource_sql)
type_queries.append(type_query)
else:
type_queries.append(MysqlRecordProperty.type == model_name)
return and_(
MysqlRecordProperty.type.in_(model_names),
or_(*type_queries))
def _get_filter_query(self, terms):
whitelist = or_(
# Because property values for custom attributes are
# `attribute_value_<id>`
MysqlRecordProperty.property.contains('attribute_value'),
MysqlRecordProperty.property.in_(
['title', 'name', 'email', 'notes', 'description', 'slug'])
)
if not terms:
return whitelist
elif terms:
return and_(whitelist, MysqlRecordProperty.content.contains(terms))
# FIXME: Temporary (slow) fix for words shorter than MySQL default limit
# elif len(terms) < 4:
# return MysqlRecordProperty.content.contains(terms)
# else:
# return MysqlRecordProperty.content.match(terms)
def _get_type_select_column(self, model):
mapper = model._sa_class_manager.mapper
if mapper.polymorphic_on is None:
type_column = literal(mapper.class_.__name__)
else:
# Handle polymorphic types with CASE
type_column = case(
value=mapper.polymorphic_on,
whens={
val: m.class_.__name__
for val, m in mapper.polymorphic_map.items()
})
return type_column
def _types_to_type_models(self, types):
if types is None:
return all_models.all_models
return [m for m in all_models.all_models if m.__name__ in types]
# filters by "myview" for a given person
def _add_owner_query(self, query, types=None, contact_id=None): # noqa
'''
Finds all objects which might appear on a user's Profile or Dashboard
pages, including:
Objects mapped via ObjectPerson
Objects owned via ObjectOwner
Objects in private contexts via UserRole (e.g. for Private Programs)
Objects for which the user is the "contact"
Objects for which the user is the "primary_assessor" or
"secondary_assessor"
Objects to which the user is mapped via a custom attribute
Assignable objects for which the user is an assignee
This method only *limits* the result set -- Contexts and Roles will still
filter out forbidden objects.
'''
# Check if the user has Creator role
current_user = get_current_user()
my_objects = contact_id is not None
if current_user.system_wide_role == "Creator":
contact_id = current_user.id
if not contact_id:
return query
type_models = self._types_to_type_models(types)
model_names = [model.__name__ for model in type_models]
models = []
for model in type_models:
base_model = model._sa_class_manager.mapper.primary_base_mapper.class_
if base_model not in models:
models.append(base_model)
models = [(model, self._get_type_select_column(model)) for model in models]
type_union_queries = []
all_people = db.session.query(
all_models.Person.id.label('id'),
literal(all_models.Person.__name__).label('type'),
literal(None).label('context_id')
)
type_union_queries.append(all_people)
# Objects to which the user is "mapped"
# We don't return mapped objects for the Creator because being mapped
# does not give the Creator necessary permissions to view the object.
if current_user.system_wide_role != "Creator":
object_people_query = db.session.query(
ObjectPerson.personable_id.label('id'),
ObjectPerson.personable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
ObjectPerson.person_id == contact_id,
ObjectPerson.personable_type.in_(model_names)
)
)
type_union_queries.append(object_people_query)
# Objects for which the user is an "owner"
object_owners_query = db.session.query(
ObjectOwner.ownable_id.label('id'),
|
joelstanner/electrum
|
plugins/trezor/qt_generic.py
|
Python
|
gpl-3.0
| 24,003 | 0.00025 |
from functools import partial
import threading
from PIL import Image
from PyQt4.Qt import Qt
from PyQt4.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt4.Qt import QVBoxLayout, QLabel, SIGNAL
from electrum_gui.qt.main_window import StatusBarButton
from electrum_gui.qt.password_dialog import PasswordDialog
from electrum_gui.qt.util import *
from .plugin import TrezorCompatiblePlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
from electrum.i18n import _
from electrum.plugins import hook, DeviceMgr
from electrum.util import PrintError
from electrum.wallet import Wallet, BIP44_Wallet
from electrum.wizard import UserCancelled
# By far the trickiest thing about this handler is the window stack;
# MacOSX is very fussy the modal dialogs are perfectly parented
class QtHandler(PrintError):
'''An interface between the GUI (here, QT) and the device handling
logic for handling I/O. This is a generic implementation of the
Trezor protocol; derived classes can customize it.'''
def __init__(self, win, pin_matrix_widget_class, device):
win.connect(win, SIGNAL('clear_dialog'), self.clear_dialog)
win.connect(win, SIGNAL('error_dialog'), self.error_dialog)
win.connect(win, SIGNAL('message_dialog'), self.message_dialog)
win.connect(win, SIGNAL('pin_dialog'), self.pin_dialog)
win.connect(win, SIGNAL('passphrase_dialog'), self.passphrase_dialog)
win.connect(win, SIGNAL('word_dialog'), self.word_dialog)
self.win = win
self.pin_matrix_widget_class = pin_matrix_widget_class
self.device = device
self.dialog = None
self.done = threading.Event()
def top_level_window(self):
return self.win.top_level_window()
def watching_only_changed(self):
self.win.emit(SIGNAL('watching_only_changed'))
def show_message(self, msg, cancel_callback=None):
self.win.emit(SIGNAL('message_dialog'), msg, cancel_callback)
def show_error(self, msg):
self.win.emit(SIGNAL('error_dialog'), msg)
def finished(self):
self.win.emit(SIGNAL('clear_dialog'))
def get_pin(self, msg):
self.done.clear()
self.win.emit(SIGNAL('pin_dialog'), msg)
self.done.wait()
return self.response
def get_word(self, msg):
self.done.clear()
self.win.emit(SIGNAL('word_dialog'), msg)
self.done.wait()
return self.word
def get_passphrase(self, msg):
self.done.clear()
self.win.emit(SIGNAL('passphrase_dialog'), msg)
self.done.wait()
return self.passphrase
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def passphrase_dialog(self, msg):
d = PasswordDialog(self.top_level_window(), None, msg,
PasswordDialog.PW_PASSPHRASE)
confirmed, p, passphrase = d.run()
if confirmed:
passphrase = BIP44_Wallet.normalize_passphrase(passphrase)
self.passphrase = passphrase
self.done.set()
def word_dialog(self, msg):
dialog = WindowModalDialog(self.top_level_window(), "")
hbox = QHBoxLayout(dialog)
hbox.addWidget(QLabel(msg))
text = QLineEdit()
text.setMaximumWidth(100)
text.returnPressed.connect(dialog.accept)
hbox.addWidget(text)
hbox.addStretch(1)
if not dialog.exec_():
return None
self.word = unicode(text.text())
self.done.set()
def message_dialog(self, msg, cancel_callback):
# Called more than once during signing, to confirm output and fee
self.clear_dialog()
title = _('Please check your %s device') % self.device
self.dialog = dialog = WindowModalDialog(self.top_level_window(), title)
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
if cancel_callback:
vbox.addLayout(Buttons(CancelButton(dialog)))
dialog.connect(dialog, SIGNAL('rejected()'), cancel_callback)
vbox.addWidget(l)
dialog.show()
def error_dialog(self, msg):
self.win.show_error(msg, parent=self.top_level_window())
def clear_dialog(self):
if self.dialog:
self.dialog.accept()
self.dialog = None
def query_choice(self, msg, labels):
return self.win.query_choice(msg, labels)
def request_trezor_init_settings(self, method, device):
wizard = self.win
vbox = QVBoxLayout()
main_label = QLabel(_("Initialization settings for your %s:") % device)
vbox.addWidget(main_label)
OK_button = OkButton(wizard, _('Next'))
def clean_text(widget):
text = unicode(widget.toPlainText()).strip()
|
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
vbox1 = QVBoxLayout()
gb.setLayout(vbox1)
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
choices = [
_("12 words"),
_("18
|
words"),
_("24 words"),
]
bg = QButtonGroup()
for i, choice in enumerate(choices):
rb = QRadioButton(gb)
rb.setText(choice)
bg.addButton(rb)
bg.setId(rb, i)
vbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
OK_button.setEnabled(Wallet.is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
OK_button.setEnabled(False)
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,10}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
cb_phrase = QCheckBox(_('Enable Passphrase protection'))
cb_phrase.setChecked(False)
vbox.addWidget(cb_phrase)
vbox.addStretch(1)
vbox.addLayout(Buttons(CancelButton(wizard), OK_button))
wizard.set_layout(vbox)
if not wizard.exec_():
raise UserCancelled
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, unicode(name.text()), pin, cb_phrase.isChecked())
def qt_plugin_class(base_plugin_class):
class QtPlugin(base_plugin_class):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def load_wallet(self, wallet, windo
|
felipenaselva/repo.felipe
|
plugin.video.salts/scrapers/onlinedizi_scraper.py
|
Python
|
gpl-2.0
| 4,998 | 0.005002 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import urllib
import kodi
import log_utils
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://onlinedizi.co'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self
|
.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'OnlineDizi'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
|
sources = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.25)
fragment = dom_parser.parse_dom(html, 'ul', {'class': 'dropdown-menu'})
if fragment:
match = re.search('''href=['"]([^'"]+)[^>]*>(?:Altyaz.{1,3}s.{1,3}z)<''', fragment[0])
if match:
option_url = urlparse.urljoin(self.base_url, match.group(1))
html = self._http_get(option_url, cache_limit=2)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'video-player'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], cache_limit=.25)
iframe_url = dom_parser.parse_dom(html, 'iframe', {'id': 'ifr'}, ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], allow_redirect=False, method='HEAD', cache_limit=.25)
if html.startswith('http'):
sources.append(html)
for match in re.finditer('"((?:\\\\x[A-Fa-f0-9]+)+)"', html):
s = match.group(1).replace('\\x', '').decode('hex')
if s.startswith('http'):
s = urllib.unquote(s)
match = re.search('videoPlayerMetadata&mid=(\d+)', s)
if match:
s = 'http://ok.ru/video/%s' % (match.group(1))
sources.append(s)
for stream_url in sources:
host = urlparse.urlparse(stream_url).hostname
quality = QUALITIES.HIGH
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': False}
hosters.append(hoster)
return hosters
def _get_episode_url(self, show_url, video):
episode_pattern = '''href=['"]([^'"]+-%s-sezon-%s-bolum[^'"]*)''' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
def search(self, video_type, title, year, season=''):
html = self._http_get(self.base_url, cache_limit=48)
results = []
seen_urls = {}
norm_title = scraper_utils.normalize_title(title)
for fragment in dom_parser.parse_dom(html, 'ul', {'class': '[^"]*all-series-list[^"]*'}):
for match in re.finditer('''href=["']([^'"]+)[^>]+>([^<]+)''', fragment):
url, match_title = match.groups()
if url not in seen_urls:
seen_urls[url] = True
if norm_title in scraper_utils.normalize_title(match_title):
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
results.append(result)
return results
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_interpolate.py
|
Python
|
mit
| 102,313 | 0.000958 |
from __future__ import division, print_function, absolute_import
import itertools
from numpy.testing import (asse
|
rt_, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose)
from pytest import raises as assert_raises
from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
import numpy as np
from scipy._lib.si
|
x import xrange
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator,
RectBivariateSpline, interpn, NdPPoly, BSpline)
from scipy.special import poch, gamma
from scipy.interpolate import _ppoly
from scipy._lib._gcutils import assert_deallocated
from scipy.integrate import nquad
from scipy.special import binom
class TestInterp2D(object):
def test_interp2d(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x+0.5*y)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
v,u = ogrid[0:2:24j, 0:pi:25j]
assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
def test_interp2d_meshgrid_input(self):
# Ticket #703
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
def test_interp2d_meshgrid_input_unsorted(self):
np.random.seed(1234)
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
z = sin(x[None,:] + y[:,None]/2.)
ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
np.random.shuffle(y)
z = sin(x[None,:] + y[:,None]/2.)
ip3 = interp2d(x, y, z, kind='cubic')
x = linspace(0, 2, 31)
y = linspace(0, pi, 30)
assert_equal(ip1(x, y), ip2(x, y))
assert_equal(ip1(x, y), ip3(x, y))
def test_interp2d_eval_unsorted(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x + 0.5*y)
func = interp2d(x, y, z)
xe = np.array([3, 4, 5])
ye = np.array([5.3, 7.1])
assert_allclose(func(xe, ye), func(xe, ye[::-1]))
assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
def test_interp2d_linear(self):
# Ticket #898
a = np.zeros([5, 5])
a[2, 2] = 1.0
x = y = np.arange(5)
b = interp2d(x, y, a, 'linear')
assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
def test_interp2d_bounds(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 2, 7)
z = x[None, :]**2 + y[:, None]
ix = np.linspace(-1, 3, 31)
iy = np.linspace(-1, 3, 33)
b = interp2d(x, y, z, bounds_error=True)
assert_raises(ValueError, b, ix, iy)
b = interp2d(x, y, z, fill_value=np.nan)
iz = b(ix, iy)
mx = (ix < 0) | (ix > 1)
my = (iy < 0) | (iy > 2)
assert_(np.isnan(iz[my,:]).all())
assert_(np.isnan(iz[:,mx]).all())
assert_(np.isfinite(iz[~my,:][:,~mx]).all())
class TestInterp1D(object):
def setup_method(self):
self.x5 = np.arange(5.)
self.x10 = np.arange(10.)
self.y10 = np.arange(10.)
self.x25 = self.x10.reshape((2,5))
self.x2 = np.arange(2.)
self.y2 = np.arange(2.)
self.x1 = np.array([0.])
self.y1 = np.array([0.])
self.y210 = np.arange(20.).reshape((2, 10))
self.y102 = np.arange(20.).reshape((10, 2))
self.y225 = np.arange(20.).reshape((2, 2, 5))
self.y25 = np.arange(10.).reshape((2, 5))
self.y235 = np.arange(30.).reshape((2, 3, 5))
self.y325 = np.arange(30.).reshape((3, 2, 5))
self.fill_value = -100.0
def test_validation(self):
# Make sure that appropriate exceptions are raised when invalid values
# are given to the constructor.
# These should all work.
for kind in ('nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'):
interp1d(self.x10, self.y10, kind=kind)
interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
interp1d(self.x10, self.y10, kind='linear',
fill_value=np.array([-1]))
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1,))
interp1d(self.x10, self.y10, kind='linear',
fill_value=-1)
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1, -1))
interp1d(self.x10, self.y10, kind=0)
interp1d(self.x10, self.y10, kind=1)
interp1d(self.x10, self.y10, kind=2)
interp1d(self.x10, self.y10, kind=3)
interp1d(self.x10, self.y210, kind='linear', axis=-1,
fill_value=(-1, -1))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=np.ones(10))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), np.ones(10)))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), -1))
# x array must be 1D.
assert_raises(ValueError, interp1d, self.x25, self.y10)
# y array cannot be a scalar.
assert_raises(ValueError, interp1d, self.x10, np.array(0))
# Check for x and y arrays having the same length.
assert_raises(ValueError, interp1d, self.x10, self.y2)
assert_raises(ValueError, interp1d, self.x2, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y102)
interp1d(self.x10, self.y210)
interp1d(self.x10, self.y102, axis=0)
# Check for x and y having at least 1 element.
assert_raises(ValueError, interp1d, self.x1, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y1)
assert_raises(ValueError, interp1d, self.x1, self.y1)
# Bad fill values
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=(-1, -1, -1)) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array((-1, -1, -1))) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[[-1]]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array([])) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=()) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=(0., [-1, -1])) # above doesn't bc
def test_init(self):
# Check that the attributes are initialized appropriately by the
# constructor.
assert_(interp1d(self.x10, self.y10).copy)
assert_(not interp1d(self.x10, self.y10, copy=False).copy)
assert_(interp1d(self.x10, self.y10).bounds_error)
assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
assert_equal(in
|
wh-acmer/minixalpha-acm
|
LeetCode/Python/best_time_to_buy_and_sell_stock.py
|
Python
|
mit
| 708 | 0.008475 |
#!/usr/bin/env python
#coding: utf-8
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):#
if not prices: return 0
n = len(prices)
min_p = prices[0]
|
max_profict = 0
for i in range(1, n):
if prices[i] < min_p:
min_p = prices[i]
cur_profit = prices[i] - min_p
if cur_profit > max_profict:
max_pr
|
ofict = cur_profit
return max_profict
if __name__ == '__main__':
s = Solution()
assert 0 == s.maxProfit([1])
assert 1 == s.maxProfit([1, 2])
assert 0 == s.maxProfit([2, 1])
assert 8 == s.maxProfit([1,3,9])
|
caktus/rapidsms-decisiontree-app
|
decisiontree/utils.py
|
Python
|
bsd-3-clause
| 4,043 | 0 |
from django.utils.encoding import force_text
from .models import Tree
def get_survey(trigger, connection):
"""Returns a survey only if it matches the connection's tenant."""
from decisiontree.multitenancy.utils import multitenancy_enabled
queryset = Tree.objects.filter(trigger__iexact=trigger)
if multitenancy_enabled():
tenant = connection.backend.tenantlink.tenant
queryset = queryset.filter(tenantlink__tenant=tenant)
return queryset.first()
def parse_tags(tagstring):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not tagstring:
return []
tagstring = force_text(tagstring)
# Special case - if there are no commas or double quotes in the
# input, we don't *do* a recall... I mean, we know we only need to
# split on spaces.
if u',' not in tagstring and u'"' not in tagstring:
words = list(set(split_strip(tagstring, u' ')))
words.sort()
return words
words = []
buffer = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
saw_loose_comma = False
open_quote = False
i = iter(tagstring)
try:
while True:
c = i.next()
if c == u'"':
if buffer:
to_be_split.a
|
ppend(u''.join(buffer))
buffer = []
# Find the matching quote
open_quote = True
c = i.next()
while c != u'"':
buffer.append(c)
c = i.next(
|
)
if buffer:
word = u''.join(buffer).strip()
if word:
words.append(word)
buffer = []
open_quote = False
else:
if not saw_loose_comma and c == u',':
saw_loose_comma = True
buffer.append(c)
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer as unquoted.
if buffer:
if open_quote and u',' in buffer:
saw_loose_comma = True
to_be_split.append(u''.join(buffer))
if to_be_split:
if saw_loose_comma:
delimiter = u','
else:
delimiter = u' '
for chunk in to_be_split:
words.extend(split_strip(chunk, delimiter))
words = list(set(words))
words.sort()
return words
def split_strip(string, delimiter=u','):
"""
Splits ``string`` on ``delimiter``, stripping each resulting string
and returning a list of non-empty strings.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not string:
return []
words = [w.strip() for w in string.split(delimiter)]
return [w for w in words if w]
def edit_string_for_tags(tags):
"""
Given list of ``Tag`` instances, creates a string representation of
the list suitable for editing by the user, such that submitting the
given string representation back without changing it will give the
same list of tags.
Tag names which contain commas will be double quoted.
If any tag name which isn't being quoted contains whitespace, the
resulting string of tag names will be comma-delimited, otherwise
it will be space-delimited.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
names = []
for tag in tags:
name = tag.name
if u',' in name or u' ' in name:
names.append('"%s"' % name)
else:
names.append(name)
return u', '.join(sorted(names))
|
agry/NGECore2
|
scripts/mobiles/corellia/greck_mugger.py
|
Python
|
lgpl-3.0
| 1,920 | 0.022917 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('greck_mugger')
mobileTemplate.setLevel(31)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("olag greck")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(128)
templates = Vector()
templates.add('object/mobile/shared_greck_thug_f_01.iff')
templates.add('object/mobile/shared_greck_thug_f_02.iff')
templates.add('object/mobile/shared_greck_thug_f_03.iff')
templates.add('object/mobile/shared_greck_thug_m_01.iff')
templates.add('object/mobile/shared_greck_thug_m_02.iff')
templates.add('object
|
/mobile/shared_greck_thug_m_03.iff')
templates.add('object/mobile/shared_greck_thug_m_04.iff')
templates.add('object/mobile/shared_greck_thug_m_05.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTem
|
plate.setDefaultAttack('meleeHit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('greck_mugger', mobileTemplate)
return
|
iScienceLuvr/PPP-CAS
|
ppp_cas/mathematicaTree.py
|
Python
|
mit
| 6,347 | 0.019064 |
class BinaryOperator:
def __init__(self, left, right):
self.left = left
self.right = right
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.left, self.right)
def toCalchas(self, op):
return '(%s%s%s)' % (self.left.toCalchas(), op, self.right.toCalchas())
class Plus(BinaryOperator):
def toCalchas(self):
return super().toCalchas('+')
class Divide(BinaryOperator):
def toCalchas(self):
return super().toCalchas('/')
class Times(BinaryOperator):
def toCalchas(self):
return super().toCalchas('*')
class Minus(BinaryOperator):
def toCalchas(self):
return super().toCalchas('-')
class Pow(BinaryOperator):
def toCalchas(self):
return super().toCalchas('**')
class Arrow(BinaryOperator):
def toCalchas(self):
return '%s,%s' % (self.left.toCalchas(), self.right.toCalchas())
class UnaryOperator:
def __init__(self, val):
self.val = val
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, self.val)
class Opp(UnaryOperator):
def toCalchas(self):
return '('+ '-' + self.val.toCalchas() +')'
class Fact(UnaryOperator):
def toCalchas(self):
return '(' + self.val.toCalchas() +'!)'
class Diff:
def __init__(self, val, nb):
self.val = val
self.nb=nb
def __str__(self):
return 'Diff('+str(self.val)+','+str(self.nb)+')'
def toCalchas(self):
return 'diff('+self.val.toCalchas()+','+self.val.args[0].toCalchas()+','+str(self.nb)+')'
class List:
def __init__(self, l):
self.list = l
def __str__(self):
if len(self.list)==0:
return 'List([])'
s = 'List(['+str(self.list[0])
for e in self.list[1:]:
s = s + ', ' + str(e)
return s+'])'
def __getitem__(self,index):
re
|
turn self.list[index]
def __add__(self, other):
return List(self.list+other.list)
def __len__(self):
return len(self.list)
def getList(self):
return self.list
def toCalchas(self):
if len(self.list)==0:
return ''
s = self
|
.list[0].toCalchas()
for e in self.list[1:]:
s = s + ', ' + e.toCalchas()
return s
class FunctionCall:
def __init__(self, function, args):
self.function = function
self.args = args
def __str__(self):
return 'FunctionCall('+str(self.function)+','+str(self.args)+')'
def toCalchas(self):
if type(self.function)==Id:
return self.translate(self.function.toCalchas(), self.args)
def translate(self, function, args):
def bigoppTranslation(functionName, args):
if len(args)==0:
return ''
if len(args)==1:
return args[0].toCalchas()
if isinstance(args[-1], List):
return '%s(%s, %s, %s, %s)'%(functionName, bigoppTranslation(functionName, args[0:-1]),args[-1][0].toCalchas(),args[-1][1].toCalchas(),args[-1][2].toCalchas())
return '%s(%s, %s)'%(functionName, bigoppTranslation(functionName, args[0:-1]),args[-1].toCalchas())
mathematicatoCalchas={'Sqrt' : (lambda a: 'sqrt('+a[0].toCalchas()+')'),
'Sin' : (lambda a: 'sin('+a[0].toCalchas()+')'),
'Cos' : (lambda a: 'cos('+a[0].toCalchas()+')'),
'Tan' : (lambda a: 'tan('+a[0].toCalchas()+')'),
'Arccos' : (lambda a: 'acos('+a[0].toCalchas()+')'),
'Arcsin' : (lambda a: 'asin('+a[0].toCalchas()+')'),
'Arctan' : (lambda a: 'atan('+a[0].toCalchas()+')'),
'Sum' : (lambda a: bigoppTranslation("sum", a)),
'Integrate' : (lambda a: bigoppTranslation("int", [a[0]]+list(reversed(a[1:])))),
'N' : (lambda a: 'N('+a.toCalchas()+')'),
'D' : (lambda a: 'diff('+a[0].toCalchas()+', '+', '.join([l.toCalchas() for l in a[1:]])+')'),
'Exp' : (lambda a: 'exp('+a.toCalchas()+')'),
'Simplify' : (lambda a: 'simplify('+a.toCalchas()+')'),
'Power' : (lambda a: 'Pow('+a.toCalchas()+')'),
'Log' : (lambda a: 'log('+List(list(reversed(a.getList()))).toCalchas()+')'),
'Log10' : (lambda a: 'lg('+a[0].toCalchas()+')'),
'Log2' : (lambda a: 'lb('+a[0].toCalchas()+')'),
'Factorial' : (lambda a: '('+a[0].toCalchas()+'!)'),
'Abs' : (lambda a: 'Abs('+a[0].toCalchas()+')'),
'Ceiling' : (lambda a: 'ceiling('+a[0].toCalchas()+')'),
'Floor' : (lambda a: 'floor('+a[0].toCalchas()+')'),
'Limit' : (lambda a: 'limit('+a[0].toCalchas() +','+ a[1].toCalchas()+')'),
'Solve' : (lambda a: 'solve(['+a[0].toCalchas() +'],['+ a[1].toCalchas()+'])'),
'Expand' : (lambda a: 'expand('+a.toCalchas()+')'),
'Factor' : (lambda a: 'factor('+a.toCalchas()+')'),
'Prime' : (lambda a: 'prime('+a.toCalchas()+')'),
'PrimeQ' : (lambda a: 'isprime('+a.toCalchas()+')'),
}
for name in mathematicatoCalchas.keys():
if name == function:
return '('+mathematicatoCalchas[name](args)+')'
return '('+function+'('+ self.args.toCalchas() +')'+')'
class Id:
def __init__(self, id):
self.id=id
def __str__(self):
return 'Id(\''+str(self.id)+'\')'
def toCalchas(self):
return self.translateId(self.id)
def translateId(self, id):
mathematicatoCalchas={'Infinity' : 'oo',
'I' : 'I',
'Pi' : 'pi',
'GoldenRatio' : 'GoldenRatio',
'EulerGamma' : 'EulerGamma',
}
if id in mathematicatoCalchas.keys():
return mathematicatoCalchas[id]
return str(id)
|
pombredanne/Rusthon
|
src/runtime/builtins_webworker.py
|
Python
|
bsd-3-clause
| 10,276 | 0.041845 |
class __WorkerPool__:
def create_webworker(self, cpuid):
## this is lazy because if the blob is created when the js is first executed,
## then it will pick all functions of `window` but they will be `undefined`
## if their definition comes after the construction of this singleton.
print 'creating blob'
## having the worker report back the current time to the main thread allows
## some gauge of its CPU load, this can be average over time, and the user
## could call something like `worker.how_busy()` which is some relative value.
header = [
'setInterval(',
' function(){',
' self.postMessage({time_update:(new Date()).getTime()});',
' }, 100',
');',
## TODO other builtins prototype hacks. see above.
'Array.prototype.append = function(a) {this.push(a);};',
]
## this is something extra stuff injected from NW.js
## that should not be inserted into the webworker.
nwjs_skip = ('Buffer', 'AppView', 'WebView')
for name in dir(window):
if name in nwjs_skip:
continue
ob = window[name]
if ob is undefined:
print 'WARNING: object in toplevel namespace window is undefined ->' + name
elif typeof(ob) == 'function':
## should actually check function code for `[ native code ]` and skip those.
header.append( 'var ' + name + '=' + ob.toString() + ';\n' )
for subname in dir(ob.prototype):
sob = ob.prototype[subname]
header.append(name + '.prototype.' +subname + '=' + sob.toString() + ';\n' )
#elif typeof(ob) == 'object':
# header.append( 'var ' + name + '=' + ob.toString() + ';\n' )
xlibs = []
for name in self.extras:
if '.' in name:
print 'import webworker submodule: ' + name
mod = name.split('.')[0]
xname = name.split('.')[1]
ob = eval(name)
if typeof(ob) == 'object': ## copy objects with static methods
print 'import object: ' + xname
header.append( name + '= {' )
for sname in Object.keys(ob):
subob = ob[sname]
ok = True
try:
tmp = eval("("+subob+")")
except:
ok = False
if ok:
print 'import->: ' + sname
header.append( '"'+sname + '":(' + ob[sname] +')' )
header.append(',\n')
header.pop()
header.append('};\n')
#if mod not in xlibs:
# print 'new module: '+mod
# header.append('var ' + mod + '= {};' )
# xlibs.append(mod)
else:
print 'import webworker module: ' + name
header.append( 'var ' + name + '= {};\n' )
modulemain = window[name]
for xname in dir(modulemain):
ob = modulemain[xname]
if typeof(ob) == 'function':
print 'import class: ' + xname
header.append( name + '.' + xname + '=' + ob.toString() + ';\n' )
if ob.prototype: ## copy methods
#for method_name in dir(ob.prototype):
for method_name in Object.keys(ob.prototype):
if method_name == 'constructor': continue
ok = True
try:
## getting some properties can throw deprecation errors
sub = ob.prototype[method_name]
except:
ok = False
if ok and typeof(sub) == 'function':
print 'import method: ' + method_name
header.append(name + '.' + xname + '.prototype.' + method_name + '=' + sub.toString() + ';' )
#header.append(name + '.' + xname + '.' + method_name + '=' + ob.toString() + ';' )
## Web Worker ##
header.extend( self.source )
blob = new(Blob(header, type='application/javascript'))
url = URL.createObjectURL(blob)
ww = new(Worker(url))
#self.thread = ww ## temp, TODO multiple threads
#self.thread.onmessage = self.update.bind(this)
ww._cpuid = cpuid
ww._last_time_update = 0
ww._stream_callbacks = {}
ww._stream_triggers = {}
ww._get_callback = None ## this should actually be a stack of callbacks, right now it assumes its synced
ww._call_callback = None ## this should actually be a stack of callbacks.
ww._callmeth_callback = None ## TODO also should be a stack
## if worker has not sent a time update in awhile ##
ww.busy = lambda : ww._last_time_update - (new(Date())).getTime() < 200
ww.how_busy = lambda : 100.0 / (ww._last_time_update - (new(Date())).getTime())
@bind(ww.spawn_class)
def _spawn_class(cfg):
sid = cfg['spawn']
print '_spawn_class:' + ww._cpuid + '|' + sid
ww._stream_callbacks[sid] = []
ww._stream_triggers[sid] = []
ww.postMessage(cfg)
def onmessage_update(evt):
if self._binrecv:
#print 'got binary....'
id = self._binrecv['id']
btype = self._binrecv['type']
self._binrecv = None
msg = None
switch btype:
case "Float32Array":
msg = new Float32Array(evt.data)
case "Float64Array":
msg = new Float64Array(evt.data)
case "Int32Array":
msg = new Int32Array(evt.data)
if id in ww._stream_callbacks: ## channels
callbacks = ww._stream_callbacks[id]
if len(callbacks):
cb = callbacks.pop()
cb( msg )
else:
ww._stream_triggers[id].push( msg )
else:
raise WebWorkerError('invalid id:' + id)
elif evt.data.time_update: ## the worker uses setInterval to report the time, see `worker.busy()`
ww._last_time_update = evt.data.time_update
elif evt.data.debug:
console.warn( ww._cpuid + '|' + evt.data.debug)
else:
ww._last_time_update = (new(Date())).getTime()
msg = evt.data.message
## restore object class
|
if `proto` was given (user static return type)
if evt.data.proto: msg.__proto__ = eval(evt.data.proto + '.prototype')
if evt.data.GET:
ww._get_callback( msg )
elif evt.data.CALL:
ww._call_callback( msg )
elif evt.data.CALLMETH:
ww._callmeth_callback( msg )
else:
id = evt.data.id
if evt.data.bin:
self._binrecv = {'id':id, 'type':evt.data.bin}
elif id in ww._stream_callbacks: ## channels
callba
|
cks = ww._stream_callbacks[id]
if len(callbacks):
cb = callbacks.pop()
cb( msg )
else:
ww._stream_triggers[id].push( msg )
else:
raise WebWorkerError('invalid id:' + id)
ww.onmessage = onmessage_update
return ww
def __init__(self, src, extras):
## note: src is an array
## note: thread-ids = `cpu-id:spawned-id`
self.source = src
self.extras = extras
## each worker in this pool runs on its own CPU core
## how to get number of CPU cores in JS?
self.pool = {}
self.num_spawned = 1 ## must be 1, not zero
def spawn(self, cfg, options):
cpu = 0
autoscale = True
if options is not undefined:
print 'using CPU:'+options.cpu
cpu = options.cpu
autoscale = False
id = str(cpu) + '|' + str(self.num_spawned)
cfg['spawn'] = self.num_spawned
self.num_spawned += 1
if cpu in self.pool:
## this thread could be busy, spawn into it anyways.
print 'reusing cpu already in pool'
self.pool[cpu].spawn_class(cfg)
elif autoscale:
print 'spawn auto scale up'
## first check if any of the other threads are not busy
readythread = None
cpu = len(self.pool.keys())
for cid in self.pool.keys():
thread = self.pool[ cid ]
if not thread.busy():
print 'reusing thread is not busy:' + cid
readythread = thread
cpu = cid
break
if not readythread:
assert cpu not in self.pool.keys()
readythread = self.create_webworker(cpu)
self.pool[cpu] = readythread
readythread.spawn_class(cfg)
else:
## user defined CPU ##
print 'spawn user defined cpu:' + cpu
assert cpu not in self.pool.keys()
readythread = self.create_webworker(cpu)
self.pool[cpu] = readythread
self.pool[cpu].spawn_class(cfg)
return id
def send(self, id=None, message=None):
tid, sid = id.split('|')
if tid not in self.pool:
raise RuntimeError('send: invalid cpu id')
if __is_typed_array(message): ## transferable buffers (no copy, moves data into worker)
bspec = {'send_binary':sid}
if instanceof(message, Float32Array):
bspec['type'] = 'Float32Array'
elif instanceof(message, Float64Array):
bspec['type'] = 'Float64Array'
elif instanceof( ob, Int32Array ):
bspec['type'] = 'Int32Array'
elif instanceof( ob, Int16Array ):
bspec['typ
|
w1ll1am23/home-assistant
|
homeassistant/util/dt.py
|
Python
|
apache-2.0
| 12,636 | 0.000554 |
"""Helper methods to handle the time in Home Assistant."""
from __future__ import annotations
from contextlib import suppress
import datetime as dt
import re
from typing import Any, cast
import ciso8601
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
NATIVE_UTC =
|
dt.timezone.utc
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyri
|
ght (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE # pylint: disable=global-statement
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> dt.tzinfo | None:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(NATIVE_UTC)
def now(time_zone: dt.tzinfo | None = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: dt.datetime | None = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(dt_or_d: dt.date | dt.datetime | None = None) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
else:
date = dt_or_d
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> dt.datetime | None:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
with suppress(ValueError, IndexError):
return ciso8601.parse_datetime(dt_str)
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: dt.tzinfo | None = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> dt.date | None:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> dt.time | None:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
delta = (now() - date).total_seconds()
rounded_delta = round(delta)
units = ["second", "minute", "hour", "day", "month"]
factors = [60, 60, 24, 30, 12]
selected_unit = "year"
for i, next_factor in enumerate(factors):
if rounded_delta < next_factor:
selected_unit = units[i]
break
delta /= next_factor
rounded_delta = round(delta)
return formatn(rounded_delta, selected_unit)
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> list[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str):
if parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
else:
res = [int(parameter)]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = sorted(int(x) for x in parameter)
for val in res:
if val < min_value or val > max_value:
raise ValueError(
f"Time expression '{parameter}': parameter {val} out of range "
f"({min_value} to {max_value})"
)
return res
def find_next_time_expression_time(
now: dt.datetime, # pylint: disable=redefined-outer-name
seconds: list[int],
minutes: list[int],
hours: list[int],
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never matches!")
def _lo
|
apple/swift-lldb
|
packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-skip-summary/TestDataFormatterSkipSummary.py
|
Python
|
apache-2.0
| 7,621 | 0.002099 |
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SkipSummaryDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr20548 fails to build on lab.llvm.org buildbot")
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24462, Data formatters have problems on Windows")
def test_with_run_command(self):
"""Test data formatter commands."""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
def data_formatter_commands(self):
"""Test that that file and class static variables display correctly."""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
#import lldbsuite.test.lldbutil as lldbutil
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Setup the summaries for this scenario
#self.runCmd("type summary add --summary-string \"${var._M_dataplus._M_p}\" std::string")
self.runCmd(
"type summary add --summary-string \"Level 1\" \"DeepData_1\"")
self.runCmd(
"type summary add --summary-string \"Level 2\" \"DeepData_2\" -e")
self.runCmd(
"type summary add --summary-string \"Level 3\" \"DeepData_3\"")
self.runCmd(
"type summary add --summary-string \"Level 4\" \"DeepData_4\"")
self.runCmd(
"type summary add --summary-string \"Level 5\" \"DeepData_5\"")
# Default case, just print out summaries
self.expect('frame variable',
substrs=['(DeepData_1) data1 = Level 1',
'(DeepData_2) data2 = Level 2 {',
'm_child1 = Level 3',
'm_child2 = Level 3',
'm_child3 = Level 3',
'm_child4 = Level 3',
'}'])
# Skip the default (should be 1) levels of summaries
self.expect('frame variable --no-summary-depth',
substrs=['(DeepData_1) data1 = {',
'm_child1 = 0x',
'}',
'(DeepData_2) data2 = {',
'm_child1 = Level 3',
'm_child2 = Level 3',
'm_child3 = Level 3',
|
'm_child4 = Level 3',
'}'])
# Now skip 2 levels of summaries
self.expect('frame variable --no-summary-depth=2',
substrs=['(DeepData_1) dat
|
a1 = {',
'm_child1 = 0x',
'}',
'(DeepData_2) data2 = {',
'm_child1 = {',
'm_child1 = 0x',
'Level 4',
'm_child2 = {',
'm_child3 = {',
'}'])
# Check that no "Level 3" comes out
self.expect(
'frame variable data1.m_child1 --no-summary-depth=2',
matching=False,
substrs=['Level 3'])
# Now expand a pointer with 2 level of skipped summaries
self.expect('frame variable data1.m_child1 --no-summary-depth=2',
substrs=['(DeepData_2 *) data1.m_child1 = 0x'])
# Deref and expand said pointer
self.expect('frame variable *data1.m_child1 --no-summary-depth=2',
substrs=['(DeepData_2) *data1.m_child1 = {',
'm_child2 = {',
'm_child1 = 0x',
'Level 4',
'}'])
# Expand an expression, skipping 2 layers of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --no-summary-depth=2',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_child2 = {',
'm_child1 = Level 5',
'm_child2 = Level 5',
'm_child3 = Level 5',
'}'])
# Expand same expression, skipping only 1 layer of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --no-summary-depth=1',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_child1 = 0x',
'Level 4',
'm_child2 = Level 4',
'}'])
# Bad debugging info on SnowLeopard gcc (Apple Inc. build 5666).
# Skip the following tests if the condition is met.
if self.getCompiler().endswith('gcc') and not self.getCompiler().endswith('llvm-gcc'):
import re
gcc_version_output = system(
[[lldbutil.which(self.getCompiler()), "-v"]])[1]
#print("my output:", gcc_version_output)
for line in gcc_version_output.split(os.linesep):
m = re.search('\(Apple Inc\. build ([0-9]+)\)', line)
#print("line:", line)
if m:
gcc_build = int(m.group(1))
#print("gcc build:", gcc_build)
if gcc_build >= 5666:
# rdar://problem/9804600"
self.skipTest(
"rdar://problem/9804600 wrong namespace for std::string in debug info")
# Expand same expression, skipping 3 layers of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --show-types --no-summary-depth=3',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_some_text = "Just a test"',
'm_child2 = {',
'm_some_text = "Just a test"'])
# Change summary and expand, first without --no-summary-depth then with
# --no-summary-depth
self.runCmd(
"type summary add --summary-string \"${var.m_some_text}\" DeepData_5")
self.expect('fr var data2.m_child4.m_child2.m_child2', substrs=[
'(DeepData_5) data2.m_child4.m_child2.m_child2 = "Just a test"'])
self.expect(
'fr var data2.m_child4.m_child2.m_child2 --no-summary-depth',
substrs=[
'(DeepData_5) data2.m_child4.m_child2.m_child2 = {',
'm_some_text = "Just a test"',
'}'])
|
Diego999/Social-Recommendation-System
|
event_analyse/event_analysis.py
|
Python
|
mit
| 2,010 | 0.002985 |
from tf_idf import *
class EventAnalysis:
"""
Class that contains all the necessary operation to process to a text analysis
"""
@staticmethod
def get_id_website(id_doc, is_website):
"""
Apply the processing to have a website id
"""
return id_doc if not is_website else id_doc + '_'
def __init__(self):
self.corpus = Corpus()
self.is_corpus_complete = False
self.tf_idf = None
def add_document_in_corpus(self, text, id_doc):
"""
The id is as follow :
- A description : Event's id
- A website : Event's id + "_"
"""
self.corpus.add_document(Document(text, id_doc))
def set_corpus_complete(self):
"""
Define the corpus as complete to proceed to the next step with tf-idf
"""
self.is_corpus_complete = True
self.tf_idf
|
= TfIdf(self.corpus)
def compute_tf_idf(self, term, id_doc):
"""
The id is as follow :
- A descr
|
iption : Event's id
- A website : Event's id + "_"
"""
return self.tf_idf.get_tf_idf(term, id_doc)
def get_tf_idf_the_k_most_important(self, k, id_doc):
"""
Return a OrderedDict that contains the k most important term (sorted by frequences). If there are
less terms as k, it returns the number of terms.
"""
if not self.is_corpus_complete:
raise Exception("The corpus is not complete ! Please call set_corpus_complete when you've filled it.")
if k <= 0:
raise Exception("The k is <= 0 !")
from itertools import islice
from collections import OrderedDict
#Transform OrderedDict(key, tuple(double1, double2)) in OrderedDict(key, double2)
return OrderedDict((x[0], (x[1][0], x[1][1], x[1][2])) for x in
islice(self.tf_idf.get_all_tf_idf_sorted(id_doc).items(), 0, k))
|
psionin/smartcoin
|
qa/rpc-tests/pruning.py
|
Python
|
mit
| 21,037 | 0.005371 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
RESCAN_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-
|
blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
# Create nodes 3 and 4 to test manual pruning (they will
|
be re-started with manual pruning later)
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
# Create nodes 5 to test wallet in prune mode, but do not connect
self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"]))
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning
|
pollitosabroson/pycarribean
|
src/books/models.py
|
Python
|
apache-2.0
| 515 | 0 |
from django.db import models
# Create your models here.
class Autor(models.Model):
nombre = models.TextField(max_length=100)
apellido = models.TextField(max_length=100)
class Libro(models.Model):
nombre = models.TextField(max_length=100)
editorial = models.TextField(max_
|
length=100)
genero = models.TextField(max_length=100)
descripcion = models.TextField()
autor = models.ForeignKey(
Autor,
null=True
)
def __unicode__(self):
ret
|
urn self.editorial
|
Apogaea/voldb
|
volunteer/core/views.py
|
Python
|
gpl-3.0
| 481 | 0 |
from django.views.generic import TemplateView
from django.conf import settings
from volunteer.apps.events.utils import get_active_event
class SiteIndexView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(SiteIndexView, self).get_context_data(**kwargs)
context['current_event'] = get_active_event(self.request.session)
context['support_email'] = settings.DEFAULT_FROM_E
|
MAIL
return context
|
|
tedlaz/pyted
|
pymiles/pyMiles2.old/pymiles/sqlite/diasql.py
|
Python
|
gpl-3.0
| 5,065 | 0.001579 |
# PyDia SQL.py : SQL dump.
# Copy it to /usr/share/dia/python
import dia
# import sys
# import os
import string
import re
import datetime
class SQLRenderer:
def __init__(self):
self.f = None
|
def begin_render(self, data, filename):
self.f = open(filename, "w")
# name = os.path.split(filename)[1]
self.f.write('''BEGIN TRANSACTION;\n''')
for layer in data.layers:
self.WriteTables(layer)
de
|
f WriteTables(self, layer):
tables = {}
appdata = 'appdata'
priority = {'fields': 0, 'foreign_keys': 100}
# value for id
z = ["INSERT INTO zf VALUES ('id', 'No', 'INTEGER', '1');"]
z.append("INSERT INTO z VALUES('diadate', '%s');" % datetime.date.today().isoformat())
zsql = "INSERT INTO z VALUES('%s', '%s');"
zfsql = "INSERT INTO zf VALUES ('%s', '%s', '%s', '%s');"
ztsql = "INSERT INTO zt VALUES ('%s', '%s', '%s', '%s');"
for o in layer.objects:
if o.type.name == 'Database - Table':
if "name" in o.properties.keys():
table = o.properties["name"].value
elif "text" in o.properties.keys():
table = o.properties["text"].value.text
else:
continue
if len(table) == 0 or string.find(table, " ") >= 0:
continue
if table not in tables.keys():
tables[table] = ''
if table == appdata:
attrs = o.properties['attributes'].value
for attr in attrs:
z.append(zsql % (attr[0], attr[1]))
continue
# zt.append(comment)
# first line is label
# second line is label plural
# third line is rpr
clst = o.properties['comment'].value.split('\n')
if len(clst) >= 3:
z.append(ztsql % (table, clst[0], clst[1], clst[2]))
atributes = o.properties['attributes'].value
for i in range(0, len(atributes)):
a = atributes[i]
if a[0] == 'id':
tables[table] = '%0.3d\tid INTEGER PRIMARY KEY\n' %\
(priority['fields'] + i)
continue
if len(a[0]) > 4:
if a[0][-3:] == '_id':
nnul = ''
if a[4] == 0:
nnul = ' NOT NULL'
tables[table] += '%0.3d\t%s INTEGER%s REFERENCES %s(id)\n' % (priority['fields'] + i, a[0], nnul, a[0][:-3])
continue
tipo = ''
if re.match('.*enum\(.*', a[1], re.I):
tipo = a[1]
else:
tipo = a[1].upper()
if tipo == '':
tipo = 'TEXT'
tables[table] += '%0.3d\t%s %s' % (priority['fields'] + i, a[0], tipo)
if a[3] == 1:
tables[table] += ' PRIMARY KEY'
if a[4] == 0:
if a[3] != 1:
tables[table] += ' NOT NULL'
notnull = 1
else:
tables[table] += ''
notnull = 0
if a[5] == 1:
if a[3] != 1:
tables[table] += ' UNIQUE'
# Create insert for table zflbl
if (len(a[2]) > 0):
z.append(zfsql % (a[0], a[2], tipo, notnull))
tables[table] += '\n'
elif o.type.name == 'Database - Reference':
continue
for k in sorted(tables.keys()):
# self.f.write('\n-- %s --\nDROP TABLE IF EXISTS `%s`;\n' % (k,k) )
if k != appdata:
self.f.write('CREATE TABLE IF NOT EXISTS %s (\n' % k)
sentences = sorted(tables[k].split('\n'))
sentences = [str(s[3:]) for s in sentences if len(s) > 4]
sentences = ",\n".join(sentences)
self.f.write('%s\n' % sentences)
self.f.write(');\n')
self.f.write('CREATE TABLE IF NOT EXISTS z (key TEXT PRIMARY KEY, val TEXT NOT NULL);\n')
self.f.write('CREATE TABLE IF NOT EXISTS zt (tbl TEXT PRIMARY KEY, tlbl TEXT NOT NULL UNIQUE, tlblp TEXT NOT NULL UNIQUE, rpr TEXT NOT NULL);\n')
self.f.write('CREATE TABLE IF NOT EXISTS zf (fld TEXT PRIMARY KEY, flbl TEXT NOT NULL UNIQUE, typos TEXT NOT NULL, nonull INTEGER NOT NULL DEFAULT 1);\n')
self.f.write('\n'.join(sorted(z)))
self.f.write('\n')
def end_render(self):
self.f.write('COMMIT;\n')
self.f.close()
# reference
dia.register_export("PyDia SQL generator", "sql", SQLRenderer())
|
janelia-flyem/neuroglancer
|
python/neuroglancer/tool/filter_bodies.py
|
Python
|
apache-2.0
| 6,932 | 0.001587 |
from __future__ import division
import json
import os
import copy
import collections
import argparse
import csv
import neuroglancer
import neuroglancer.cli
import numpy as np
class State(object):
def __init__(self, path):
self.path = path
self.body_labels = collections.OrderedDict()
def load(self):
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.body_labels = collections.OrderedDict(json.load(f))
def save(self):
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
class Tool(object):
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
self.bodies = bodies
self.state.load()
self.total_voxels = sum(x.num_voxels for x in bodies)
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
s.layout = '3d'
key_bindings = [
['bracketleft', 'prev-index'],
['bracketright', 'next-index'],
['home', 'first-index'],
['end', 'last-index'],
['control+keys', 'save'],
]
label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
for label, label_key in zip(labels, label_keys):
key_bindings.append([label_key, 'label-%s' % label])
def label_func(s, label=label):
self.set_label(s, label)
self.viewer.actions.add('label-%s' % label, label_func)
self.viewer.actions.add('prev-index', self._prev_index)
self.viewer.actions.add('next-index', self._next_index)
self.viewer.actions.add('first-index', self._first_index)
self.viewer.actions.add('last-index', self._last_index)
self.viewer.actions.add('save', self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
def _find_one_after_last_labeled_index(self):
body_index = 0
while self.bodies[body_index].segment_id in self.state.body_labels:
body_index += 1
return body_index
def set_index(self, index):
if index == self.index:
return
body = self.bodies[index]
self.index = index
def modify_state_for_body(s, body):
s.layers['segmentation'].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
modify_state_for_body(s, body)
prefetch_states = []
for i in range(self.num_to_prefetch):
prefetch_index = self.index + i + 1
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
prefetch_state.layout = '3d'
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
with self.viewer.config_state.txn() as s:
s.prefetch = [
neuroglancer.PrefetchState(state=prefetch_state, priority=-i)
for i, prefetch_state in enumerate(prefetch_states)
]
label = self.state.body_labels.get(body.segment_id, '')
with self.viewer.config_state.txn() as s:
s.status_messages['status'] = (
'[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
(index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
self.cumulative_voxels[index] / self.total_voxels, label))
def save(self, s):
self.state.save()
def set_label(self, s, label):
self.state.body_labels[self.bodies[self.index].segment_id] = label
self.set_index(self.index + 1)
def _first_index(self, s):
self.set_index(0)
def _last_index(self, s):
self.set_index(max(0, self._find_one_after_last_labeled_index() - 1))
def _next_index(self, s):
self.set_index(self.index + 1)
def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
ap.add_argument('--segmentation-url',
required=True,
help='Neuroglancer data source URL for segmentation')
ap.add_argument('--state', required=True, help='Path to proofreading state file')
ap.add_argument('--bodies', required=True, help='Path to
|
list of bodies to proofread')
ap.add_a
|
rgument('--labels', nargs='+', help='Labels to use')
ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
with open(args.bodies, 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
segment_id=int(row['id']),
num_voxels=int(row['num_voxels']),
bbox_start=np.array([
int(row['bbox.start.x']),
int(row['bbox.start.y']),
int(row['bbox.start.z'])
],
dtype=np.int64),
bbox_size=np.array(
[int(row['bbox.size.x']),
int(row['bbox.size.y']),
int(row['bbox.size.z'])],
dtype=np.int64),
))
tool = Tool(
state_path=args.state,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
labels=args.labels,
bodies=bodies,
num_to_prefetch=args.prefetch,
)
print(tool.viewer)
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/third_party/tlslite/tlslite/__init__.py
|
Python
|
mit
| 904 | 0.002212 |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite is a free python library that implements SSL and TLS. TLS Lite
supports RSA and SRP ciphersuites. TLS Lite is
|
pure python, however it can use
other libraries for faster crypto operations. TLS Lite integrates with several
stdlib neworking libraries.
API documentation is available in the 'docs' directory.
If you have questions or feedback, feel free to contact me.
To use, do::
from tlslite import TLSConnection, ...
If you want to import the most useful objects, the cleanest way is:
from tlslite.api import *
Then use the L{tlslite.TLSConnec
|
tion.TLSConnection} class with a socket.
(Or, use one of the integration classes in L{tlslite.integration}).
@version: 0.4.8
"""
from tlslite.api import *
from tlslite.api import __version__ # Unsure why this is needed, but it is
|
anderson89marques/PyFixedFlatFile
|
pyFixedFlatFile/exceptions.py
|
Python
|
mit
| 476 | 0.002101 |
class ParamsException(Exception):
"""Exception raised when tp, fmt
|
and size values are wrongs"""
pass
class LineSizeException(Exception):
"""Exception raised when line size is bigger then specified"""
pass
class LineIdentifierException(Exception):
"""Exception raised when line indentifier rased from the
file is different to the line identifier used in the specification
obs: line identifier is defined using .eq() fu
|
nction
"""
pass
|
huran2014/huran.github.io
|
wot_gateway/usr/lib/python2.7/posixfile.py
|
Python
|
gpl-2.0
| 8,003 | 0.004748 |
"""Extended file operations available in POSIX.
f = posixfile.open(filename, [mode, [bufsize]])
will create a new posixfile object
f = posixfile.fileopen(fileobject)
will create a posixfile object from a builtin file object
f.file()
will return the original builtin file object
f.dup()
will return a new file object based on a new filedescriptor
f.dup2(fd)
will return a new file object based on the given filedescriptor
f.flags(mode)
will turn on the associated flag (merge)
mode can contain the following characters:
(character representing a flag)
a append only flag
c close on exec flag
n no delay flag
s synchronization flag
(modifiers)
! turn flags 'off' instead of default 'on'
= copy flags 'as is' instead of default 'merge'
? return a string in which the characters represent the flags
that are set
note: - the '!' and '=' modifiers are mutually exclusive.
- the '?' modifier will return the status of the flags after they
have been changed by other characters in the mode string
f.lock(mode [, len [, start [, whence]]])
will (un)lock a region
mode can contain the following characters:
(character representing type of lock)
u unlock
r read lock
w write lock
(modifiers)
| wait until the lock can be granted
? return the first lock conflicting with the requested lock
or 'None' if there is no conflict. The lock returned is in the
format (mode, len, start, whence, pid) where mode is a
character representing the type of lock ('r' or 'w')
note: - the '?' modifier prevents a region from being locked; it is
query only
"""
import warnings
warnings.warn("The posixfile module is deprecated; "
"fcntl.lockf() provides better locking", DeprecationWarning, 2)
class _posixfile_:
"""File wrapper class that provides extra POSIX file routines."""
states = ['open', 'closed']
#
# Internal routines
#
def __repr__(self):
file = self._file_
return "<%s posixfile '%s', mode '%s' at %s>" % \
(self.states[file.closed], file.name, file.mode, \
hex(id(self))[2:])
#
# Initialization routines
#
def open(self, name, mode='r', bufsize=-1):
import __builtin__
return self.fileopen(__builtin__.
|
open(name, mode, bufsize))
def fileopen(self, file):
import types
if repr(type(file)) != "<type 'file'>":
raise TypeError, 'posixfile.fileopen() arg must be file object'
self._file_
|
= file
# Copy basic file methods
for maybemethod in dir(file):
if not maybemethod.startswith('_'):
attr = getattr(file, maybemethod)
if isinstance(attr, types.BuiltinMethodType):
setattr(self, maybemethod, attr)
return self
#
# New methods
#
def file(self):
return self._file_
def dup(self):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
def dup2(self, fd):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
posix.dup2(self._file_.fileno(), fd)
return posix.fdopen(fd, self._file_.mode)
def flags(self, *which):
import fcntl, os
if which:
if len(which) > 1:
raise TypeError, 'Too many arguments'
which = which[0]
else: which = '?'
l_flags = 0
if 'n' in which: l_flags = l_flags | os.O_NDELAY
if 'a' in which: l_flags = l_flags | os.O_APPEND
if 's' in which: l_flags = l_flags | os.O_SYNC
file = self._file_
if '=' not in which:
cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if '!' in which: l_flags = cur_fl & ~ l_flags
else: l_flags = cur_fl | l_flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags)
if 'c' in which:
arg = ('!' not in which) # 0 is don't, 1 is do close on exec
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg)
if '?' in which:
which = '' # Return current flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if os.O_APPEND & l_flags: which = which + 'a'
if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1:
which = which + 'c'
if os.O_NDELAY & l_flags: which = which + 'n'
if os.O_SYNC & l_flags: which = which + 's'
return which
def lock(self, how, *args):
import struct, fcntl
if 'w' in how: l_type = fcntl.F_WRLCK
elif 'r' in how: l_type = fcntl.F_RDLCK
elif 'u' in how: l_type = fcntl.F_UNLCK
else: raise TypeError, 'no type of lock specified'
if '|' in how: cmd = fcntl.F_SETLKW
elif '?' in how: cmd = fcntl.F_GETLK
else: cmd = fcntl.F_SETLK
l_whence = 0
l_start = 0
l_len = 0
if len(args) == 1:
l_len = args[0]
elif len(args) == 2:
l_len, l_start = args
elif len(args) == 3:
l_len, l_start, l_whence = args
elif len(args) > 3:
raise TypeError, 'too many arguments'
# Hack by davem@magnet.com to get locking to go on freebsd;
# additions for AIX by Vladimir.Marangozov@imag.fr
import sys, os
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'freebsd6', 'freebsd7', 'freebsd8',
'bsdos2', 'bsdos3', 'bsdos4'):
flock = struct.pack('lxxxxlxxxxlhh', \
l_start, l_len, os.getpid(), l_type, l_whence)
elif sys.platform in ('aix3', 'aix4'):
flock = struct.pack('hhlllii', \
l_type, l_whence, l_start, l_len, 0, 0, 0)
else:
flock = struct.pack('hhllhh', \
l_type, l_whence, l_start, l_len, 0, 0)
flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
if '?' in how:
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'bsdos2', 'bsdos3', 'bsdos4'):
l_start, l_len, l_pid, l_type, l_whence = \
struct.unpack('lxxxxlxxxxlhh', flock)
elif sys.platform in ('aix3', 'aix4'):
l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
struct.unpack('hhlllii', flock)
elif sys.platform == "linux2":
l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
struct.unpack('hhllhh', flock)
else:
l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
struct.unpack('hhllhh', flock)
if l_type != fcntl.F_UNLCK:
if l_type == fcntl.F_RDLCK:
return 'r', l_len, l_start, l_whence, l_pid
else:
return 'w', l_len, l_start, l_whence, l_pid
def open(name, mode='r', bufsize=-1):
"""Public routine to open a file as a posixfile object."""
return _posixfile_().open(name, mode, bufsize)
def fileopen(file):
"""Public routine to get a posixfile object from a Python file object."""
return _posixfile_().fileopen(file)
#
# Constants
#
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#
# End of posixfile.py
#
|
teroc/Otero
|
tests/run_tests.py
|
Python
|
mit
| 1,499 | 0.002668 |
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Tampere University of Technology,
# Intel Corporation,
# OptoFidelity,
# and authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICUL
|
AR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable = C0103, C0111, C0302, C0326
# pylint: disable = R0902, R0903, R0904, R0911, R0912, R0913, R0
|
914, R0915
# pylint: disable = W0212
import unittest
_testloader = unittest.TestLoader()
_testsuite = _testloader.discover(".")
_testresult = unittest.TextTestRunner(verbosity = 2).run(_testsuite)
|
IEEEDTU/CMS
|
manage.py
|
Python
|
mit
| 246 | 0 |
#!/us
|
r/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE"
|
, "CMS.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
miurahr/seahub
|
seahub/file_tags/migrations/0002_remove_filetags_parent_folder_uuid.py
|
Python
|
apache-2.0
| 360 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15
|
on 2019-03-01 02:16
from django.db imp
|
ort migrations
class Migration(migrations.Migration):
dependencies = [
('file_tags', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='filetags',
name='parent_folder_uuid',
),
]
|
davidshepherd7/Landau-Lifshitz-Gilbert-ODE-model
|
llg/mallinson.py
|
Python
|
gpl-3.0
| 4,251 | 0.000235 |
"""Calculate exact solutions for the zero dimensional LLG as given by
[Mallinson2000]
"""
from __future__ import division
from __future__ import absolute_import
from math import sin, cos, tan, log, atan2, acos, pi, sqrt
import scipy as sp
import matplotlib.pyplot as plt
import functools as ft
import simpleode.core.utils as utils
def calculate_switching_time(magnetic_parameters, p_start, p_now):
"""Calculate the time taken to switch from polar angle p_start to p_now
with the magnetic parameters given.
"""
# Should never quite get to pi/2
# if p_now >= pi/2:
# return sp.inf
# Cache some things to simplify the expressions later
H = magnetic_parameters.H(None)
Hk = magnetic_parameters.Hk()
alpha = magnetic_parameters.alpha
gamma = magnetic_parameters.gamma
# Calculate the various parts of the expression
prefactor = ((alpha**2 + 1)/(gamma * alpha)) \
* (1.0 / (H**2 - Hk**2))
a = H * log(tan(p_now/2) / tan(p_start/2))
b = Hk * log((H - Hk*cos(p_start)) /
(H - Hk*cos(p_now)))
c = Hk * log(sin(p_now) / sin(p_start))
# Put everything together
return prefactor * (a + b + c)
def calculate_azimuthal(magnetic_parameters, p_start, p_now):
"""Calculate the azimuthal angle corresponding to switching from
p_start to p_now with the magnetic parameters given.
"""
def azi_into_range(azi):
a = azi % (2*pi)
if a < 0:
a += 2*pi
return a
alpha = magnetic_parameters.alpha
no_range_azi = (-1/alpha) * log(tan(p_now/2) / tan(p_start/2))
return azi_into_range(no_range_azi)
def generate_dynamics(magnetic_parameters,
start_angle=pi/18,
end_angle=17*pi/18,
steps=1000):
"""Generate a list of polar angles then return a list of corresponding
m directions (in spherical polar coordinates) and switching times.
"""
mag_params = magnetic_parameters
# Construct a set of solution positions
pols = sp.linspace(start_angle, end_angle, steps)
azis = [calculate_azimuthal(mag_params, start_angle, p) for p in pols]
sphs = [utils.SphPoint(1.0, azi, pol) for azi, pol in zip(azi
|
s, pols)]
# Calculate switching times for these positions
times = [calculate_switching_time(mag_params, start_angle, p)
for p in pols]
return (sphs, times)
|
def plot_dynamics(magnetic_parameters,
start_angle=pi/18,
end_angle=17*pi/18,
steps=1000):
"""Plot exact positions given start/finish angles and magnetic
parameters.
"""
sphs, times = generate_dynamics(magnetic_parameters, start_angle,
end_angle, steps)
sphstitle = "Path of m for " + str(magnetic_parameters) \
+ "\n (starting point is marked)."
utils.plot_sph_points(sphs, title=sphstitle)
timestitle = "Polar angle vs time for " + str(magnetic_parameters)
utils.plot_polar_vs_time(sphs, times, title=timestitle)
plt.show()
def calculate_equivalent_dynamics(magnetic_parameters, polars):
"""Given a list of polar angles (and some magnetic parameters)
calculate what the corresponding azimuthal angles and switching times
(from the first angle) should be.
"""
start_angle = polars[0]
f_times = ft.partial(calculate_switching_time, magnetic_parameters,
start_angle)
exact_times = [f_times(p) for p in polars]
f_azi = ft.partial(calculate_azimuthal, magnetic_parameters, start_angle)
exact_azis = [f_azi(p) for p in polars]
return exact_times, exact_azis
def plot_vs_exact(magnetic_parameters, ts, ms):
# Extract lists of the polar coordinates
m_as_sph_points = map(utils.array2sph, ms)
pols = [m.pol for m in m_as_sph_points]
azis = [m.azi for m in m_as_sph_points]
# Calculate the corresponding exact dynamics
exact_times, exact_azis = \
calculate_equivalent_dynamics(magnetic_parameters, pols)
# Plot
plt.figure()
plt.plot(ts, pols, '--',
exact_times, pols)
plt.figure()
plt.plot(pols, azis, '--',
pols, exact_azis)
plt.show()
|
pgmmpk/cslavonic
|
cslavonic/ucs_decode.py
|
Python
|
mit
| 9,501 | 0.001053 |
'''
Created on Feb 4, 2016
Decoding tables taken from https://github.com/typiconman/Perl-Lingua-CU
@author: mike kroutikov
'''
from __future__ import print_function, unicode_literals
import codecs
def ucs_decode(input_, errors='strict'):
return ''.join(decoding_table[x] for x in input_), len(input_)
def ucs_encode(input_, errors):
raise NotImplementedError('encoding to UCS is not implemented')
### Decoding Table
decoding_table = (
'\x00',
'\x01',
'\x02',
'\x03',
'\x04',
'\x05',
'\x06',
'\x07',
'\x08',
'\t',
'\n',
'\x0b',
'\x0c',
'\r',
'\x0e',
'\x0f',
'\x10',
'\x11',
'\x12',
'\x13',
'\x14',
'\x15',
'\x16',
'\x17',
'\x18',
'\x19',
'\x1a',
'\x1b',
'\x1c',
'\x1d',
'\x1e',
'\x1f',
' ',
'!',
'"',
'\u0486',
'\u0486\u0301',
'\u0486\u0300',
'\u0483',
"'",
'(',
')',
'\ua673',
'\u2de1\u0487', # combining VE
',',
'-',
'.',
'/',
'\u043e\u0301',
'\u0301',
'\u0300',
'\u0486',
'\u0486\u0301',
'\u0486\u0300',
'\u0311', # combining inverted breve
'\u0483', # titlo
'\u033e', # combining vertical tilde
'\u0436\u0483', # zhe with titlo above
':',
';',
'\u2def', # combining HA
'\u2de9\u0487', # combining EN
'\u2dec\u0487', # combining ER
'\u2df1\u0487', # combining CHE
'\u0300',
'\u0430\u0300', # latin A maps to AZ with grave accent
'\u0463\u0311', # latin B maps to Yat' with inverted breve
'\u2ded\u0487', # combining ES
'\u0434\u2ded\u0487',
'\u0435\u0300', # latin E maps to e with grave accent
'\u0472', # F maps to THETA
'\u0433\u0483', # G maps to ge with TITLO
'\u0461\u0301', # latin H maps to omega with acute accent
'\u0406',
'\u0456\u0300',
'\ua656\u0486', # YA with psili
'\u043b\u2de3', # el with cobining de
'\u0476', # capital IZHITSA with kendema
'\u047a\u0486', # capital WIDE ON with psili
'\u047a', # just capital WIDE ON
'\u0470', # capital PSI
'\u047c', # capital omega with great apostrophe
'\u0440\u0483', # lowercase re with titlo
'\u0467\u0300', # lowercase small yus with grave
'\u047e', # capital OT
'\u041e\u0443', # diagraph capital UK
'\u0474', # capital IZHITSA
'\u0460', # capital OMEGA
'\u046e', # capital XI
'\ua64b\u0300', # monograph uk with grave
'\u0466', # capital SMALL YUS
'[',
'\u0483', # yet another titlo
']',
'\u0311', # combining inverted breve
'\u033e', # yet another yerik
'`',
'\u0430\u0301', # latin A maps to AZ with acute accent
'\u2dea\u0487', # combining ON
'\u2ded\u0487', # combining ES
'\u2de3', # combining DE
'\u0435\u0301', # latin E maps to e with acute accent
'\u0473', # lowercase theta
'\u2de2\u0487', # combining ge
'\u044b\u0301', # ery with acute accent
'\u0456',
'\u0456\u0301', # i with acute accent
'\ua657\u0486', # iotaed a with psili
'\u043b\u0483', # el with titlo
'\u0477', # izhitsa with izhe titlo
'\u047b\u0486', # wide on with psili
'\u047b', # wide on
'\u0471', # lowercase psi
'\u047d', # lowercase omega with great apostrophe
'\u0440\u2ded\u0487', # lowercase er with combining es
'\u0467\u0301', # lowercase small yus with acute accent
'\u047f', # lowercase ot
'\u1c82\u0443', # diagraph uk
'\u0475', # lowercase izhitsa
'\u0461', # lowercase omega
'\u046f', # lowercase xi
'\ua64b\u0301', # monograph uk with acute accent
'\u0467', # lowercase small yus
'\ua64b\u0311', # monograph uk with inverted breve
'\u0467\u0486\u0300', # lowercase small yus with apostroph
'\u0438\u0483', # the numeral eight
'\u0301', # yet another acute accent
'\x7f',
'\u0475\u0301', # lowercase izhitsa with acute
'\u0410\u0486\u0301', # uppercase A with psili and acute
'\u201a',
'\u0430\u0486\u0301', # lowercase A with psili and acute
'\u201e',
'\u046f\u0483', # the numberal sixty
'\u0430\u0311', # lowercase a with inverted breve
'\u0456\u0311', # lowercase i with inverted breve
'\u2de5', # combining ze
'\u0467\u0311', # lowercase small yus with inverted breve
'\u0466\u0486', # upercase small yus with psili
'\u0456\u0483', # the numeral ten
'\u0460\u0486', # capital OMEGA with psili
'\u041e\u0443\u0486\u0301', # diagraph uk with apostroph
'\ua656\u0486\u0301', # uppercase Iotated A with apostroph
'\u047a\u0486\u0301', # uppercase Round O with apostroph
'\u0475\u2de2\u0487', # lowercase izhitsa with combining ge
'\u2018',
'\u2019',
'\u201c',
'\u201d',
'\u2de4', # combining zhe
'\u2013',
'\u2014',
'\ufffe',
'\u0442\u0483',
'\u0467\u0486', # lowercase small yus with psili
'\u0475\u0311', # izhitsa with inverted breve
'\u0461\u0486', # lowercase omega with psili
'\u1c82\u0443\u0486\u0301', # diagraph uk with apostroph
'\ua657\u0486\u0301', # lowercase iotaed a with apostroph
'\u047b\u0486\u0301', # lowercase Round O with apostroph
'\xa0',
'\u041e\u0443\u0486', # Capital Diagraph Uk with psili
'\u1c82\u0443\u0486', # lowercase of the above
'\u0406\u0486\u0301', # Uppercase I with apostroph
'\u0482', # cyrillic thousands sign
'\u0410\u0486', # capital A with psili
'\u0445\u0483', # lowercase kha with titlo
'\u0447\u0483', # the numeral ninety
'\u0463\u0300', # lowecase yat with grave accent
'\u0441\u0483', # the numeral two hundred
'\u0404',
'\xab',
'\xac',
'\xad',
'\u0440\u2de3', # lowercase er with dobro titlo
'\u0406\u0486',
'\ua67e', # kavyka
'\ua657\u0486\u0300',
'\u0406',
'\u0456\u0308',
'\u0430\u0486',
'\u0443', # small letter u (why encoded at the micro sign?!)
'\xb6',
'\xb7',
'\u0463\u0301', # lowercase yat with acute accent
'\u0430\u0483', # the numeral one
'\u0454', # wide E
'\xbb',
'\u0456\u0486\u0301', # lowercase i with apostroph
'\u0405',
'\u0455',
'\u0456\u0486', # lowercase i with psili
'\u0410',
'\u0411',
'\u0412',
'\u0413',
'\u0414',
'\u0415',
'\u0416',
'\u0417',
'\u0418',
'\u0419',
'\u041a',
'\u041b',
'\u041c',
'\u041d',
'\u041e',
'\u041f',
'\u0420',
'\u0421',
'\u0422',
'\ua64a',
'\u0424',
'\u0425',
'\u0426',
'\u0427',
'\u0428',
'\u0429',
'\u042a',
'\u042b',
'\u042c',
'\u0462', # capital yat
'\u042e',
'\ua656', # capital Iotified A
'\u0430',
'\u0431',
'\u0432',
'\u0433',
'\u0434',
'\u0435',
'\u0436',
'\u0437',
'\u0438',
'\u0439',
'\u043a',
'\u043b',
'\u043c',
'\u043d',
'\u043e',
'\u043f',
'\u0440',
'\u0441',
'\u0442',
'\ua64b', # monograph Uk (why?!)
'\u0444',
'\u0445',
'\u0446',
'\u0447',
'\u0448',
'\u0449',
'\u044a',
'\u044b',
'\u044c',
'\u0463', # lowercase yat
'\u044e',
'\ua657', # iot
|
aed a
)
def _build_decoding_table(fname):
'''unitily to build decoding_table from Perl's ucsequivs file. we base on cp1251 and overlay data from ucsequi
|
vs'''
from encodings import cp1251
decode_table = list(cp1251.decoding_table)
comments = [None] * 256
with codecs.open(fname, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line or line == 'use utf8;' or line.startswith('#'):
continue
key, chars, comment = parse_perl_dictionary_entry(line)
decode_table[key] = chars
comments[key] = comment
return decode_table, comments
def parse_perl_dictionary_entry(line):
key, value = line.split('=>')
key = key.strip().strip("'")
if key == '\\\\':
key = '\\'
key = key.encode('cp1251')
assert len(key) == 1, key
key = int(key[0])
value = val
|
pchretien/btiswatchingu
|
python/bt_device.py
|
Python
|
gpl-2.0
| 1,349 | 0.004448 |
## BTisWatchingU ##
#
# This program scans for bluetooth devices and add their address and name to a
# centralized database. This database
|
have some simple facilities to determine
# where and when the device have been spotted.
# Copyright (C) 2008,2009 Philippe Chretien
#
# This program is free software; you can redistribute it and/or
# modify it under
|
the terms of the GNU General Public License Version 2
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# You will find the latest version of this code at the following address:
# http://github.com/pchretien
#
# You can contact me at the following email address:
# philippe.chretien@gmail.com
class device:
__name = ""
__address = ""
def __init__(self, deviceName, deviceAddress):
self.__name = deviceName
self.__address = deviceAddress
def getName(self):
return self.__name
def getAddress(self):
return self.__address
|
balloob/home-assistant
|
homeassistant/components/rest/sensor.py
|
Python
|
apache-2.0
| 9,072 | 0.000882 |
"""Support for RESTful API sensors."""
import json
import logging
from xml.parsers.expat import ExpatError
import httpx
from jsonpath import jsonpath
import voluptuous as vol
import xmltodict
from homeassistant.components.sensor import DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_TIMEOUT,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
from .data import DEFAULT_TIMEOUT, RestData
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
CONF_JSON_ATTRS = "json_attributes"
CONF_JSON_ATTRS_PATH = "json_attributes_path"
METHODS = ["POST", "GET"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_JSON_ATTRS, default=[]): cv.ensure_list_csv,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(METHODS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_JSON_ATTRS_PATH): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the RESTful sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
json_attrs = config.get(CONF_JSON_ATTRS)
json_attrs_path = config.get(CONF_JSON_ATTRS_PATH)
force_update = config.get(CONF_FORCE_UPDATE)
timeout = config.get(CONF_TIMEOUT)
if value_template is not None:
value_template.hass = hass
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.render(parse_result=False)
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = httpx.DigestAuth(username, password)
else:
auth = (username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
await rest.async_update()
if rest.data is None:
raise PlatformNotReady
# Must update the sensor now (including fetching the rest resource) to
# ensure it's updating its state.
async_add_entities(
[
RestSensor(
hass,
rest,
name,
unit,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
json_attrs_path,
)
],
True,
)
class RestSensor(Entity):
"""Implementation of a REST sensor."""
def __init__(
self,
hass,
rest,
name,
unit_of_measurement,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
json_attrs_path,
):
"""Initialize the REST sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._value_template = value_template
self._json_attrs = json_attrs
self._attributes = None
self._force_update = force_update
self._resource_template = resource_template
self._json_attrs_path = json_attrs_path
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return if the sensor data are available."""
return self.rest.data is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def force_update(self):
"""Force update."""
return self._force_update
async def async_update(self):
"""Get the latest data from REST API and update the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.render(parse_result=False))
await self.rest.async_update()
value = self.rest.data
_LOGGER.debug("Data fetched from resource: %s", value)
if self.rest.headers is not None:
# If the http request failed, heade
|
rs will be None
content_type = self.rest.headers.get("content-type")
if content_type and (
content_type.startswith("text/xml")
or content_type.startswith("application/xml")
):
try:
value = json.dumps(xmltodic
|
t.parse(value))
_LOGGER.debug("JSON converted from XML: %s", value)
except ExpatError:
_LOGGER.warning(
"REST xml result could not be parsed and converted to JSON"
)
_LOGGER.debug("Erroneous XML: %s", value)
if self._json_attrs:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if self._json_attrs_path is not None:
json_dict = jsonpath(json_dict, self._json_attrs_path)
# jsonpath will always store the result in json_dict[0]
# so the next line happens to work exactly as needed to
# find the result
if isinstance(json_dict, list):
json_dict = json_dict[0]
if isinstance(json_dict, dict):
attrs = {
k: json_dict[k] for k in self._json_attrs if k in json_dict
}
self._attributes = attrs
else:
_LOGGER.warning(
|
davidam/python-examples
|
sparql/dbpedia-asturias.py
|
Python
|
gpl-3.0
| 416 | 0.002404 |
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Asturias> rdfs:label ?label }
""")
sparql.
|
setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
print(result
|
["label"]["value"])
|
Ilias95/apts
|
setup.py
|
Python
|
gpl-3.0
| 1,102 | 0.024501 |
#!/usr/bin/env python3
import apts
from distutils.core import setup
setup(
name = apts.__name__,
packages = [apts.__name__],
scripts = ['bin/ap
|
ts'],
version = apts.__version__,
description = ap
|
ts.__description__,
author = apts.__author__,
author_email = apts.__author_email__,
license = apts.__license__,
platforms = apts.__platforms__,
url = apts.__url__,
download_url = apts.__download_url__,
keywords = ['tftp', 'server', 'file transfer'],
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: GNU General Public License (GPL)',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Development Status :: 3 - Alpha',
'Environment :: No Input/Output (Daemon)',
'Natural Language :: English',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'Topic :: Communications :: File Sharing',
],
)
|
Digilent/u-boot-digilent
|
test/py/tests/test_zynqmp_secure.py
|
Python
|
gpl-2.0
| 3,375 | 0.001778 |
# Copyright (c) 2018, Xilinx Inc.
#
# Siva Durga Prasad Paladugu
#
# SPDX-License-Identifier: GPL-2.0
import pytest
import re
import random
import u_boot_utils
"""
Note: This test relies on boardenv_* containing configuration values to define
the network available and files to be used for testing. Without this, this test
will be automatically skipped.
For example:
# True if a DHCP server is attached to the network, and should be tested.
env__net_dhcp_server = True
# A list of environment variables that should be set in order to configure a
# static IP. In this test case we atleast need serverip for performing tftpb
# to get required files.
env__net_static_env_vars = [
("ipaddr", "10.0.0.100"),
("netmask", "255.255.255.0"),
("serverip", "10.0.0.1"),
]
# Details regarding the files that may be read from a TFTP server. .
env__zynqmp_secure_readable_file = {
"fn": "auth_bhdr_ppk1.bin",
"enckupfn": "auth_bhdr_enc_kup_load.bin",
"addr": 0x1000000,
"keyaddr": 0x100000,
"keyfn": "aes.txt",
}
"""
import test_net
@pytest.mark.buildconfigspec('cmd_zynqmp')
@pytest.mark.buildconfigspec('cmd_net')
@pytest.mark.buildconfigspec('cmd_dhcp')
@pytest.mark.buildconfigspec('net')
def test_zynqmp_secure_boot_image(u_boot_console):
test_net.test_net_dhcp(u_boot_console)
test_net.test_net_setup_static(u_boot_console)
f = u_boot_console.config.env.get('env__zynqmp_secure_readable_file', None)
if not f:
pytest.skip('No TFTP readable file to read')
addr = f.get('addr', None)
if not addr:
addr = u_boot_utils.find_ram_base(u_boot_console)
expected_tftp = 'Bytes transferred = '
fn = f['fn']
output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
assert expected_tftp in output
expected_zynqmpsecure = 'Verified image at'
output = u_boot_console.run_command('zynqmp secure %x $filesize' % (addr))
assert expected_zynqmpsecure in output
output = u_boot_console.run_command('pri zynqmp_verified_img_addr')
assert "Error" not in output
@pytest.mark.buildconfigspec('cmd_zynqmp')
@pytest.mark.buildconfigspec('cmd_net')
@pytest.mark.buildconfigspec('cmd_dhcp')
@pytest.mark.buildconfigspec('net')
def test_zynqmp_secure_boot_img_kup(u_boot_console):
test_net.test_net_dhcp(u_boot_console)
test_net.test_net_setup_static(u_boot_console)
f = u_boot_console.config.env.get('env__zynqmp_secure_readable_file', None)
if not f:
pytest.skip('No TFTP readable file to read')
keyaddr = f.get('keyaddr', None)
if not keyaddr:
addr = u_boot_utils.find_ram_base(u_boot_console)
expected_tftp = 'Bytes transferred = '
keyfn = f['keyfn']
output = u_boot_console.run_command('tftpboot %x %s' % (keyaddr, keyfn))
assert expected_tftp in output
addr = f.get('addr', None)
if not addr:
addr = u_boot_utils.find_ram_base(u_boot_console)
expected_tftp = 'Bytes transferred = '
fn = f['enckupfn']
output = u_boot_console.run_command('tftpboot %x
|
%s' % (addr, fn))
assert expected_tftp in output
expected_zynqmpsecure = 'Verified image at'
output = u_boot_console.run_command('zynqmp
|
secure %x $filesize %x' % (addr, keyaddr))
assert expected_zynqmpsecure in output
output = u_boot_console.run_command('pri zynqmp_verified_img_addr')
assert "Error" not in output
|
joaquimrocha/Rancho
|
rancho/message/management.py
|
Python
|
agpl-3.0
| 1,442 | 0.002774 |
########################################################################
# Rancho - Open Source Group/Project Management Tool
# Copyright (C) 2008 The Rancho Team
#
# This program is fr
|
ee software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope tha
|
t it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
from rancho.notification import models as notification
from rancho.message import models as message_app
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.create_notice_type("message_new", _("New message"), _("A new message has been created"))
notification.create_notice_type("message_replied", _("Message replyed"), _("A message has been replyed"))
signals.post_syncdb.connect(create_notice_types, message_app)
|
shigmas/django-push-notifications
|
push_notifications/apns.py
|
Python
|
mit
| 5,297 | 0.020578 |
"""
Apple Push Notification Service
Documentation is available on the iOS Developer Library:
https://developer.apple.com/library/content/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/APNSOverview.html
"""
import time
from apns2 import client as apns2_client
from apns2 import credentials as apns2_credentials
from apns2 import errors as apns2_errors
from apns2 import payload as apns2_payload
from . import models
from . import NotificationError
from .apns_errors import reason_for_exception_class
from .conf import get_manager
class APNSError(NotificationError):
pass
class APNSUnsupportedPriority(APNSError):
pass
class APNSServerError(APNSError):
def __init__(self, status):
super(APNSServerError, self).__init__(status)
self.status = status
def _apns_c
|
reate_socket(creds=None, application_id=None):
if creds is None:
if not get_manager().has_auth_token_creds(application_id):
cert = get_manager().get_apns_certificate(application_id)
creds = apns2_credentials.CertificateCredentials(cert)
else:
keyPath, keyId, teamId = get_manager().get_apns_auth_creds(application_id)
# No use getting a lifetime because this credential is
# ephemeral, but if you're looking at this to see how to
# create a credential, you could also pass the lifetime an
|
d
# algorithm. Neither of those settings are exposed in the
# settings API at the moment.
creds = creds or apns2_credentials.TokenCredentials(keyPath, keyId, teamId)
client = apns2_client.APNsClient(
creds,
use_sandbox=get_manager().get_apns_use_sandbox(application_id),
use_alternative_port=get_manager().get_apns_use_alternative_port(application_id)
)
client.connect()
return client
def _apns_prepare(
token, alert, application_id=None, badge=None, sound=None, category=None,
content_available=False, action_loc_key=None, loc_key=None, loc_args=[],
extra={}, mutable_content=False, thread_id=None, url_args=None):
if action_loc_key or loc_key or loc_args:
apns2_alert = apns2_payload.PayloadAlert(
body=alert if alert else {}, body_localized_key=loc_key,
body_localized_args=loc_args, action_localized_key=action_loc_key)
else:
apns2_alert = alert
if callable(badge):
badge = badge(token)
return apns2_payload.Payload(
apns2_alert, badge, sound, content_available, mutable_content, category,
url_args, custom=extra, thread_id=thread_id)
def _apns_send(
registration_id, alert, batch=False, application_id=None, creds=None, **kwargs
):
client = _apns_create_socket(creds=creds, application_id=application_id)
notification_kwargs = {}
# if expiration isn"t specified use 1 month from now
notification_kwargs["expiration"] = kwargs.pop("expiration", None)
if not notification_kwargs["expiration"]:
notification_kwargs["expiration"] = int(time.time()) + 2592000
priority = kwargs.pop("priority", None)
if priority:
try:
notification_kwargs["priority"] = apns2_client.NotificationPriority(str(priority))
except ValueError:
raise APNSUnsupportedPriority("Unsupported priority %d" % (priority))
if batch:
data = [apns2_client.Notification(
token=rid, payload=_apns_prepare(rid, alert, **kwargs)) for rid in registration_id]
# returns a dictionary mapping each token to its result. That
# result is either "Success" or the reason for the failure.
return client.send_notification_batch(
data, get_manager().get_apns_topic(application_id=application_id),
**notification_kwargs
)
data = _apns_prepare(registration_id, alert, **kwargs)
client.send_notification(
registration_id, data,
get_manager().get_apns_topic(application_id=application_id),
**notification_kwargs
)
def apns_send_message(registration_id, alert, application_id=None, creds=None, **kwargs):
"""
Sends an APNS notification to a single registration_id.
This will send the notification as form data.
If sending multiple notifications, it is more efficient to use
apns_send_bulk_message()
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
try:
_apns_send(
registration_id, alert, application_id=application_id,
creds=creds, **kwargs
)
except apns2_errors.APNsException as apns2_exception:
if isinstance(apns2_exception, apns2_errors.Unregistered):
device = models.APNSDevice.objects.get(registration_id=registration_id)
device.active = False
device.save()
raise APNSServerError(status=reason_for_exception_class(apns2_exception.__class__))
def apns_send_bulk_message(
registration_ids, alert, application_id=None, creds=None, **kwargs
):
"""
Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
results = _apns_send(
registration_ids, alert, batch=True, application_id=application_id,
creds=creds, **kwargs
)
inactive_tokens = [token for token, result in results.items() if result == "Unregistered"]
models.APNSDevice.objects.filter(registration_id__in=inactive_tokens).update(active=False)
return results
|
jbwhit/OSCON-2015
|
deliver/coal_data_cleanup.py
|
Python
|
mit
| 2,214 | 0.006323 |
# coding: utf-8
# # Clean the raw data
#
# ## Data from U.S. Energy Information Administration
#
# Data URL: [eia.gov](http://www.eia.gov/coal/data.cfm)
#
# Combining and cleaning the raw csv files into a cleaned data set and coherent database.
#
# Generally a good idea to have a separate data folder with the raw data.
#
# When you clean the raw data, leave the raw in place, and create cleaned version with the steps included (ideal situation for Notebook).
# In[1]:
# %install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py
get_ipython().magic(u'load_ext version_information')
get_ipython().magic(u'reload_ext version_information')
get_ipython().magic(u'version_information numpy, scipy, matplotlib, pandas')
# In[2]:
import numpy as np
import pandas as pd
# In[3]:
get_ipython().system(u'pwd')
# In[4]:
# The cleaned data file is saved here:
output_file = "../data/coal_prod_cleaned.csv"
|
# In[5]:
df1 = pd.read_csv("../data/coal_prod_2002.csv", index_col="MSHA_ID")
df2 = pd.read_csv("../data/coal_prod_2003.csv", index_col="MSHA_ID")
df3 = pd.read_csv("../data/coal_prod_2004.csv", index_col="MSHA_ID")
df4 = pd.read_csv("../data/coal_prod_2005.csv", index_col="MSHA_ID")
df5 = pd.read_csv("../data/coal_prod_2006.csv", index_col="MSHA_ID")
df6 = pd.read_csv("../data/coal_prod_2007.csv", in
|
dex_col="MSHA_ID")
df7 = pd.read_csv("../data/coal_prod_2008.csv", index_col="MSHA_ID")
df8 = pd.read_csv("../data/coal_prod_2009.csv", index_col="MSHA_ID")
df9 = pd.read_csv("../data/coal_prod_2010.csv", index_col="MSHA_ID")
df10 = pd.read_csv("../data/coal_prod_2011.csv", index_col="MSHA_ID")
df11 = pd.read_csv("../data/coal_prod_2012.csv", index_col="MSHA_ID")
# In[6]:
dframe = pd.concat((df1, df2, df3, df4, df5, df6, df7, df8, df9, df10, df11))
# In[7]:
# Noticed a probable typo in the data set:
dframe['Company_Type'].unique()
# In[8]:
# Correcting the Company_Type
dframe.loc[dframe['Company_Type'] == 'Indepedent Producer Operator', 'Company_Type'] = 'Independent Producer Operator'
dframe.head()
# In[9]:
dframe[dframe.Year == 2003].head()
# # Final Cleaned Data Product
# In[10]:
dframe.to_csv(output_file, )
# In[ ]:
|
magnastrazh/NEUCOGAR
|
nest/GDP/scripts/parameters.py
|
Python
|
gpl-2.0
| 3,431 | 0.001749 |
from property import *
# Neuron common parameters
iaf_neuronparams = {'E_L': -70.,
'V_th': -50.,
'V_reset': -67.,
'C_m': 2.,
't_ref': 2.,
'V_m': -60.,
'tau_syn_ex': 1.,
'tau_syn_in': 1.33}
# Synapse common parameters
STDP_synapseparams = {
'model': 'stdp_synapse',
'tau_m': {'distribution': 'uniform', 'low': 15., 'high': 25.},
'alpha': {'distribution': 'normal_clipped', 'low': 0.5, 'mu': 5.0, 'sigma': 1.0},
'delay': {'distribution': 'uniform',
|
'low': 0.8, 'high': 2.5},
'lambda': 0.5
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': {'distribu
|
tion': 'uniform', 'low': 1, 'high': 1.3},
'weight': w_Glu,
'Wmax': 70.}, **STDP_synapseparams)
# GABA synapse
STDP_synparams_GABA = dict({'delay': {'distribution': 'uniform', 'low': 1., 'high': 1.3},
'weight': w_GABA,
'Wmax': -60.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': {'distribution': 'uniform', 'low': 1, 'high': 1.3},
'weight': w_ACh,
'Wmax': 70.}, **STDP_synapseparams)
# Dopamine synapse common parameter
DOPA_synparams = {'delay': 1.}
# Dopamine exhibitory synapse
DOPA_synparams_ex = dict({'weight': w_DA_ex,
'Wmax': 100.,
'Wmin': 85.}, **DOPA_synparams)
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'weight': w_DA_in,
'Wmax': -100.,
'Wmin': -85.}, **DOPA_synparams)
# Noradreanaline synapse common parameter
NORA_synparams = {'delay': 1.}
# Noradreanaline exhibitory synapse
NORA_synparams_ex = dict({'weight': w_NA_ex,
'Wmax': 100.,
'Wmin': 85.}, **NORA_synparams)
# Serotonin synapse common parameter
SERO_synparams = {'delay': 1.}
# Serotonin inhibitory synapse
SERO_synparams_in = dict({'weight': w_SE_in,
'Wmax': -100.,
'Wmin': -85.}, **SERO_synparams)
# Create volume transmitters
# Dictionary of synapses with keys and their parameters
types = {GABA: (STDP_synparams_GABA, w_GABA, 'GABA'),
ACh: (STDP_synparams_ACh, w_ACh, 'Ach'),
Glu: (STDP_synparams_Glu, w_Glu, 'Glu'),
DA_ex: (DOPA_synparams_ex, w_DA_ex, 'DA_ex', dopa_model_ex),
DA_in: (DOPA_synparams_in, w_DA_in, 'DA_in', dopa_model_in),
NA_ex: (NORA_synparams_ex, w_NA_ex, 'NA_ex', nora_model_ex),
SE_in: (SERO_synparams_in, w_SE_in, 'SE_in', sero_model_in) }
# Parameters for generator links
static_syn = {
'model': 'static_synapse',
'weight': w_Glu * 5,
'delay': pg_delay
}
# Connection parameters
conn_dict = {'rule': 'all_to_all',
'multapses': True}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
|
warkanum/warkanums-pi-device-snippets
|
build1/boot_special.py
|
Python
|
mit
| 8,959 | 0.018306 |
#!/opt/python3.3/bin/python3.3
from threading import Thread
import RPi.GPIO as GPIO
import time
import datetime
import os
import sys
import dht11_sensor
import psycopg2
import copy
###-----------------Hardware Settings-----------------------
PIN_LC=25 #Light Sensor (GPIO.IN, pull_up_down=GPIO.PUD_UP)
PIN_MC=17 #Motion Sensor (GPIO.IN, pull_up_down=GPIO.PUD_UP)
PIN_TC=4 #Temp Sensor (GPIO.IN, pull_up_down=GPIO.PUD_UP)
PIN_TC_WP=7 #Temp Sensor #Wirepi pin 7
PIN_LED1=23 #LED Blue 1
PIN_LED2=24 #LED Blue 2
###------------------SQL Settings-----------------------------
SQL_SRV='127.0.0.1'
SQL_USER='pistats'
SQL_PASSWD='pistats'
SQL_DB='pistats'
#setup pins. Some are setup by functions below.
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_LED1,GPIO.OUT)
GPIO.setup(PIN_LED2,GPIO.OUT)
GPIO.setup(PIN_MC, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#dim leds
GPIO.output(PIN_LED1,GPIO.LOW)
GPIO.output(PIN_LED2,GPIO.LOW)
def UnixLocalEpoch():
dt = datetime.datetime.now()
return int((dt - datetime.datetime(1970,1,1)).total_seconds())
def PhotoSensor(RCpin):
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin
|
, G
|
PIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
return reading
def TempsensorRead():
### Loop through the temp sensor library until we get a valid reading ###
for i in range(1,100):
data = dht11_sensor.read(PIN_TC_WP)
#print('Temp={0}*C Humidity={1}% Status={2} Error={3}'.format(data['temperature'], data['humidity'], data['valid'], data['err']))
if data['valid'] == 1:
return data
return None
def save_data(p_SensorValues):
try:
sql_con = psycopg2.connect(host=SQL_SRV, user=SQL_USER,password=SQL_PASSWD,database=SQL_DB)
sql_cur = sql_con.cursor()
print('2temp->' + str(p_SensorValues['temperature']['save']))
print('2light->' + str(p_SensorValues['light']['save']))
print('2motion->' + str(p_SensorValues['motion']['save']))
if p_SensorValues.get('motion', None):
sql_cur.execute("""select id, data_read from sensordata
where sensor_type = 'motion' order by id desc limit 1""")
data = sql_cur.fetchone()
if not data or p_SensorValues['motion']['save']: #(data and str(data[1]) != str(p_SensorValues['motion']['data'])):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES (%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('motion', p_SensorValues['motion']['data'], p_SensorValues['motion']['read'],p_SensorValues['motion']['read'] ))
if p_SensorValues.get('light', None):
sql_cur.execute("select id, data_read from sensordata where sensor_type = 'light' order by id desc limit 1")
data = sql_cur.fetchone()
#we have a +- 10 variance on light.
if not data or p_SensorValues['light']['save']: #(data and (int(p_SensorValues['light']['data']) > int(data[1])+10 or int(p_SensorValues['light']['data']) < int(data[1]) - 10) ):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES(%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('light', p_SensorValues['light']['data'], p_SensorValues['light']['read'],p_SensorValues['light']['read'] ))
if p_SensorValues.get('temperature', None):
sql_cur.execute("select id, data_read from sensordata where sensor_type = 'temperature' order by id desc limit 1")
data = sql_cur.fetchone()
if not data or p_SensorValues['temperature']['save']: #(data and str(data[1]) != str(p_SensorValues['temperature']['temperature'])):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES(%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('temperature', p_SensorValues['temperature']['temperature'], p_SensorValues['temperature']['read'], p_SensorValues['temperature']['read'] ))
if p_SensorValues.get('temperature', None):
sql_cur.execute("select id, data_read from sensordata where sensor_type = 'humidity' order by id desc limit 1")
data = sql_cur.fetchone()
if not data or p_SensorValues['temperature']['save']:#(data and str(data[1]) != str(p_SensorValues['temperature']['humidity'])):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES(%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('humidity', p_SensorValues['temperature']['humidity'], p_SensorValues['temperature']['read'], p_SensorValues['temperature']['read'] ))
sql_con.commit()
sql_cur.close()
sql_con.close()
except psycopg2.Error as e:
print("SQL error in save_data: " + str(e))
except Exception as e:
print("Unknown error in save_data: " + str(e))
def main():
SensorValue = {}
TICK_LT = 0 #light detect ticker
TICK_LTI = 0 #light insert ticker
TICK_TMP = 0 #temp ticker
BlueLed = False
while True:
changed = False
motionData = GPIO.input(PIN_MC)
if not SensorValue.get('motion', None):
SensorValue['motion'] = {'data': motionData , 'read': UnixLocalEpoch(), 'changed': UnixLocalEpoch(), 'save': False}
SensorValue['motion']['save'] = False
if int(SensorValue['motion'].get('data', 0)) != int(motionData) :
changed = True
SensorValue['motion']['changed'] = UnixLocalEpoch()
SensorValue['motion']['save'] = True
SensorValue['motion']['data'] = int(motionData)
SensorValue['motion']['read'] = UnixLocalEpoch()
if (SensorValue['motion']['data'] > 0):
#GPIO.output(PIN_LED1,GPIO.HIGH) #flash led
SensorValue['motion']['lastmotion'] = UnixLocalEpoch()
BlueLed = True
else:
#GPIO.output(PIN_LED1,GPIO.LOW) #flash led stop
BlueLed = False
#Measure Light
if not SensorValue.get('light', None):
SensorValue['light'] = {'data': PhotoSensor(PIN_LC) , 'read': UnixLocalEpoch(), 'save': False }
SensorValue['light']['save'] = False
lightChanges = 0
if (TICK_LT < time.perf_counter()):
TICK_LT = time.perf_counter()+1
lightData = PhotoSensor(PIN_LC)
lightChanges = abs(SensorValue['light'].get('data', 0) - lightData)
#print("LC->" + str(lightData ) + "DF:" + str(lightChanges))
if (TICK_LTI < time.perf_counter() or (lightData > 600 and lightChanges > 200) or (lightData < 600 and lightChanges > 30)):
TICK_LTI = time.perf_counter()+30
if SensorValue['light'].get('data', 0) != lightData :
changed = True
SensorValue['light']['changed'] = UnixLocalEpoch()
SensorValue['light']['save'] = True
SensorValue['light']['data'] = lightData
SensorValue['light']['read'] = UnixLocalEpoch()
#Measure Temprature, this might hold the thread for a few seconds at most.
if not SensorValue.get('temperature', None):
SensorValue['temperature'] = {'temperature': 0, 'humidity': 0, 'changed': 0, 'save': False}
SensorValue['temperature']['save'] = False
if (TICK_TMP < time.perf_counter()):
TICK_TMP = time.perf_counter()+10
tempData = TempsensorRead()
if tempData:
print('temperature reading...')
if (SensorValue['temperature'].get('temperature', 0) != tempData['temperature']
or SensorValue['temperature'].get('humidity', 0) != tempData['humidity']):
SensorValue['temperature']['changed'] = UnixLocalEpoch()
SensorValue['temperature']['temperature'] = tempData['
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_importlib/import_/test_caching.py
|
Python
|
gpl-2.0
| 3,643 | 0.000823 |
"""Test that sys.modules is used properly by import."""
from .. import util
from . import util as import_util
import sys
from types import MethodType
import unittest
class UseCache:
"""When it comes to sys.modules, import prefers it over anything else.
Once a name has been resolved, sys.modules is checked to see if it contains
the module desired. If so, then it is returned [use cache]. If it is not
found, then the proper steps are taken to perform the import, but
sys.modules is still used to return the imported module (e.g., not what a
loader returns) [from cache on return]. This also applies to imports of
things contained within a package and thus get assigned as an attribute
[from cache to attribute] or pulled in thanks to a fromlist import
[from cache for fromlist]. But if sys.modules contains None then
ImportError is raised [None in cache].
"""
def test_using_cache(self):
# [use cache]
module_to_use = "some module found!"
with util.uncache('some_module'):
sys.modules['some_module'] = module_to_use
|
module = self.__import__('some_module')
self.assertEqual(id(module_to_use), id(module))
def test_None_in_cache(self):
#[None in cache]
n
|
ame = 'using_None'
with util.uncache(name):
sys.modules[name] = None
with self.assertRaises(ImportError) as cm:
self.__import__(name)
self.assertEqual(cm.exception.name, name)
Frozen_UseCache, Source_UseCache = util.test_both(
UseCache, __import__=import_util.__import__)
class ImportlibUseCache(UseCache, unittest.TestCase):
# Pertinent only to PEP 302; exec_module() doesn't return a module.
__import__ = import_util.__import__[1]
def create_mock(self, *names, return_=None):
mock = util.mock_modules(*names)
original_load = mock.load_module
def load_module(self, fullname):
original_load(fullname)
return return_
mock.load_module = MethodType(load_module, mock)
return mock
# __import__ inconsistent between loaders and built-in import when it comes
# to when to use the module in sys.modules and when not to.
def test_using_cache_after_loader(self):
# [from cache on return]
with self.create_mock('module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('module')
self.assertEqual(id(module), id(sys.modules['module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_assigning_to_attribute(self):
# [from cache to attribute]
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg.module')
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_fromlist(self):
# [from cache for fromlist]
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg', fromlist=['module'])
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
if __name__ == '__main__':
unittest.main()
|
mrakitin/sirepo
|
sirepo/job_api.py
|
Python
|
apache-2.0
| 8,078 | 0.002723 |
u"""Entry points for job execution
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkinspect, pkjson
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp, pkdpretty
from sirepo import api_perm
from sirepo import simulation_db
from sirepo.template import template_common
import inspect
import pykern.pkconfig
import pykern.pkio
import re
import requests
import sirepo.auth
import sirepo.http_reply
import sirepo.http_request
import sirepo.job
import sirepo.mpi
import sirepo.sim_data
import sirepo.util
cfg = None
#: how many call frames to search backwards to find the api_.* caller
_MAX_FRAME_SEARCH_DEPTH = 6
@api_perm.require_user
def api_downloadDataFile(simulation_type, simulation_id, model, frame, suffix=None):
#TODO(robnagler) validate suffix and frame
req = sirepo.http_request.parse_params(
id=simulation_id,
model=model,
type=simulation_type,
check_sim_exists=True,
)
s = suffix and sirepo.srschema.parse_name(suffix)
t = None
with simulation_db.tmp_dir() as d:
# TODO(e-carlin): computeJobHash
t = sirepo.job.DATA_FILE_ROOT.join(sirepo.job.unique_key())
t.mksymlinkto(d, absolute=True)
try:
_request(
computeJobHash='unused',
dataFileKey=t.basename,
frame=int(frame),
isParallel=False,
req_data=req.req_data,
suffix=s,
)
f = d.listdir()
if len(f) > 0:
assert len(f) == 1, \
'too many files={}'.format(f)
return sirepo.http_reply.gen_file_as_attachment(f[0])
except requests.exceptions.HTTPError:
#TODO(robnagler) HTTPError is too coarse a check
pass
finally:
if t:
pykern.pkio.unchecked_remove(t)
raise sirepo.util.raise_not_found(
'frame={} not found {id} {type}'.format(frame, **req)
)
@api_perm.allow_visitor
def api_jobSupervisorPing():
import requests.exceptions
e = None
try:
k = sirepo.job.unique_key()
r = _request(
_request_content=PKDict(ping=k),
_request_uri=cfg.supervisor_uri + sirepo.job.SERVER_PING_URI,
)
if r.get('state') != 'ok':
return r
try:
x = r.pknested_get('ping')
if x == k:
return r
e = 'expected={} but got ping={}'.format(k, x)
except KeyError:
e = 'incorrectly formatted reply'
pkdlog(r)
except requests.exceptions.ConnectionError:
|
e = 'unable to connect to supervisor'
except Exception as e:
pkdlog(e)
e = 'unexpected exception'
return PKDict(state='error', error=e)
@api_perm.require_user
def api_runCancel():
try:
return _request()
except Exception as e:
pkdlog('ignoring exception={}
|
stack={}', e, pkdexc())
# Always true from the client's perspective
return sirepo.http_reply.gen_json({'state': 'canceled'})
@api_perm.require_user
def api_runSimulation():
r = _request_content(PKDict(fixup_old_data=True))
# TODO(e-carlin): This should really be done in job_supervisor._lib_dir_symlink()
# but that is outside of the Flask context so it won't work
r.simulation_lib_dir = sirepo.simulation_db.simulation_lib_dir(r.simulationType)
return _request(_request_content=r)
@api_perm.require_user
def api_runStatus():
return _request()
@api_perm.require_user
def api_simulationFrame(frame_id):
return template_common.sim_frame(
frame_id,
lambda a: _request(
analysisModel=a.frameReport,
# simulation frames are always sequential requests even though
# the report name has 'animation' in it.
isParallel=False,
req_data=PKDict(**a),
)
)
@api_perm.require_user
def api_sbatchLogin():
r = _request_content(
PKDict(computeJobHash='unused', jobRunMode=sirepo.job.SBATCH),
)
r.sbatchCredentials = r.pkdel('data')
return _request(_request_content=r)
def init_apis(*args, **kwargs):
global cfg
#TODO(robnagler) if we recover connections with agents and running jobs remove this
pykern.pkio.unchecked_remove(sirepo.job.LIB_FILE_ROOT, sirepo.job.DATA_FILE_ROOT)
pykern.pkio.mkdir_parent(sirepo.job.LIB_FILE_ROOT)
pykern.pkio.mkdir_parent(sirepo.job.DATA_FILE_ROOT)
cfg = pykern.pkconfig.init(
supervisor_uri=sirepo.job.DEFAULT_SUPERVISOR_URI_DECL,
)
def _request(**kwargs):
def get_api_name():
f = inspect.currentframe()
for _ in range(_MAX_FRAME_SEARCH_DEPTH):
m = re.search(r'^api_.*$', f.f_code.co_name)
if m:
return m.group()
f = f.f_back
else:
raise AssertionError(
'{}: max frame search depth reached'.format(f.f_code)
)
k = PKDict(kwargs)
u = k.pkdel('_request_uri') or cfg.supervisor_uri + sirepo.job.SERVER_URI
c = k.pkdel('_request_content') or _request_content(k)
c.pkupdate(
api=get_api_name(),
serverSecret=sirepo.job.cfg.server_secret,
)
pkdlog(
'api={} runDir={}',
c.api,
c.get('runDir')
)
r = requests.post(
u,
data=pkjson.dump_bytes(c),
headers=PKDict({'Content-type': 'application/json'}),
verify=sirepo.job.cfg.verify_tls,
)
r.raise_for_status()
return pkjson.load_any(r.content)
def _request_content(kwargs):
d = kwargs.pkdel('req_data')
if not d:
#TODO(robnagler) need to use parsed values, ok for now, becasue none of
# of the used values are modified by parse_post. If we have files (e.g. file_type, filename),
# we need to use those values from parse_post
d = sirepo.http_request.parse_post(
fixup_old_data=kwargs.pkdel('fixup_old_data', False),
id=True,
model=True,
check_sim_exists=True,
).req_data
s = sirepo.sim_data.get_class(d)
##TODO(robnagler) this should be req_data
b = PKDict(data=d, **kwargs)
# TODO(e-carlin): some of these fields are only used for some type of reqs
b.pksetdefault(
analysisModel=lambda: s.parse_model(d),
computeJobHash=lambda: d.get('computeJobHash') or s.compute_job_hash(d),
computeJobSerial=lambda: d.get('computeJobSerial', 0),
computeModel=lambda: s.compute_model(d),
isParallel=lambda: s.is_parallel(d),
#TODO(robnagler) relative to srdb root
simulationId=lambda: s.parse_sid(d),
simulationType=lambda: d.simulationType,
).pkupdate(
reqId=sirepo.job.unique_key(),
runDir=str(simulation_db.simulation_run_dir(d)),
uid=sirepo.auth.logged_in_user(),
).pkupdate(
computeJid=s.parse_jid(d, uid=b.uid),
userDir=str(sirepo.simulation_db.user_dir_name(b.uid)),
)
return _run_mode(b)
def _run_mode(request_content):
if 'models' not in request_content.data:
return request_content
#TODO(robnagler) make sure this is set for animation sim frames
m = request_content.data.models.get(request_content.computeModel)
j = m and m.get('jobRunMode')
if not j:
request_content.jobRunMode = sirepo.job.PARALLEL if request_content.isParallel \
else sirepo.job.SEQUENTIAL
return request_content
if j not in simulation_db.JOB_RUN_MODE_MAP:
raise sirepo.util.Error(
'invalid jobRunMode={} computeModel={} computeJid={}'.format(
j,
request_content.computeModel,
request_content.computeJid,
)
)
return request_content.pkupdate(
jobRunMode=j,
sbatchCores=m.sbatchCores,
sbatchHours=m.sbatchHours,
)
|
oculusstorystudio/kraken
|
Python/kraken/ui/preference_editor.py
|
Python
|
bsd-3-clause
| 7,028 | 0.001565 |
#
# Copyright 2010-2014 Fabric Technologies Inc. All rights reserved.
#
import os
import json
import collections
from kraken.ui.Qt import QtGui, QtWidgets, QtCore
class PreferenceEditor(QtWidgets.QDialog):
"""A widget providing the ability to nest """
def __init__(self, parent=None):
# constructors of base classes
super(PreferenceEditor, self).__init__(parent)
self.setObjectName('PreferenceEditor')
self.setWindowTitle('Preference Editor')
self.setWindowFlags(QtCore.Qt.Dialog)
self.resize(600, 300)
self.prefValueWidgets = []
self.createLayout()
self.createConnections()
def createLayout(self):
# Parent Layout
self._topLayout = QtWidgets.QVBoxLayout()
self._topLayout.setContentsMargins(0, 0, 0, 0)
self._topLayout.setSpacing(0)
self._mainWidget = QtWidgets.QWidget()
self._mainWidget.setObjectName('mainPrefWidget')
# Main Layout
self._mainLayout = QtWidgets.QVBoxLayout(self._mainWidget)
self._mainLayout.setContentsMargins(0, 0, 0, 0)
self._mainLayout.setSpacing(0)
self._preferenceLayout = QtWidgets.QGridLayout()
self._preferenceLayout.setContentsMargins(10, 10, 10, 10)
self._preferenceLayout.setSpacing(3)
self._preferenceLayout.setColumnMinimumWidth(0, 200)
self._preferenceLayout.setColumnStretch(0, 1)
self._preferenceLayout.setColumnStretch(1, 2)
# Add widgets based on type here
preferences = self.parentWidget().window().preferences.getPreferences()
i = 0
sortedPrefs = collections.OrderedDict(sorted(preferences.items(), key=lambda p: p[0]))
for k, v in sortedPrefs.iteritems():
labelFrameWidget = QtWidgets.QFrame()
labelFrameWidget.setObjectName('prefLabelWidgetFrame')
labelFrameWidget.setFrameStyle(QtWidgets.QFrame.NoFrame | QtWidgets.QFrame.Plain)
labelFrameWidget.setToolTip(v['description'])
labelFrameLayout = QtWidgets.QHBoxLayout()
prefLabel = QtWidgets.QLabel(v['nice_name'], self)
prefLabel.setProperty('labelClass', 'preferenceLabel')
prefLabel.setObjectName(k + "_label")
prefLabel.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
prefLabel.setMinimumWidth(300)
labelFrameLayout.addWidget(prefLabel)
labelFrameWidget.setLayout(labelFrameLayout)
self._preferenceLayout.addWidget(labelFrameWidget, i, 0)
if v['type'] == 'bool':
valueFrameWidget = QtWidgets.QFrame()
valueFrameWidget.setObjectName('prefValueWidgetFrame')
valueFrameWidget.setFrameStyle(QtWidgets.QFrame.NoFrame | QtWidgets.QFrame.Plain)
valueFrameLayout = QtWidgets.QHBoxLayout()
valueWidget = QtWidgets.QCheckBox(self)
valueWidget.setObjectName(k + "_valueWidget")
valueWidget.setChecked(v['value'])
valueFrameLayout.addWidget(valueWidget)
valueFrameWidget.setLayout(valueFrameLayout)
self._preferenceLayout.addWidget(valueFrameWidget, i, 1, 1, 1)
self.prefValueWidgets.append(valueWidget)
i += 1
# OK and Cancel buttons
buttonLayout = QtWidgets.QHBoxLayout()
buttonLayout.setContentsMargins(10, 10, 10, 10)
buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
buttonLayout.addWidget(buttons)
# Menu Bar
self.menu_bar = QtWidgets.QMenuBar()
self.file_menu = self.menu_bar.addMenu('&File')
self.importPrefAction = self.file_menu.addAction('&Import...')
self.exportPrefAction = self.file_menu.addAction('&Export...')
self._mainLayout.addWidget(self.menu_bar)
self._mainLayout.addLayout(self._preferenceLayout)
self._mainLayout.addStretch(1)
self._mainLayout.addLayout(buttonLayout)
self._topLayout.addWidget(self._mainWidget)
self.setLayout(self._topLayout)
def createConnections(self):
self.importPrefAction.triggered.connect(self.importPrefs)
self.exportPrefAction.triggered.connect(self.exportPrefs)
def importPrefs(self):
fileDialog = QtWidgets.QFileDialog(self)
fileDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, on=True)
fileDialog.setWindowTitle('Import Preferences')
fileDialog.setDirectory(os.path.expanduser('~'))
fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptOpen)
fileDialog.setNameFilter('JSON files (*.json)')
if fileDialog.exec_() == QtWidgets.QFileDialog.Accepted:
filePath = fileDialog.selectedFiles()[0]
with open(filePath, "r") as openPrefFile:
loadedPrefs = json.load(openPrefFile)
self.parentWidget().window().preferences.loadPreferences(loadedPrefs)
self.updatePrefValues()
def exportPrefs(self):
fileDialog = QtWidgets.QFileDialog(self)
fileDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, on=True)
fileDialog.setWindowTitle('Export Preferences')
fileDialog.setDirectory(os.path.expanduser('~'))
fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
fileDialog.setNameFilter('JSON files (*.json)
|
')
fileDialog.setDefaultSuffix('json')
if fileDialog.exec_() == QtWidgets.QFileDialog.Accepted:
filePath = fileDialog.selectedFiles()[0]
preferences = self.parentWidg
|
et().window().preferences.getPreferences()
with open(filePath, "w+") as savePrefFile:
json.dump(preferences, savePrefFile)
def updatePrefValues(self):
"""Updates the preference widgets with the values from the preferences.
This is used when loading preferences from a file so that the widgets in
the UI match what was loaded.
"""
preferences = self.parentWidget().window().preferences
for widget in self.prefValueWidgets:
prefName = widget.objectName().rsplit('_', 1)[0]
pref = preferences.getPreference(prefName)
if pref['type'] == 'bool':
widget.setChecked(pref['value'])
# =======
# Events
# =======
def accept(self):
preferences = self.parentWidget().window().preferences
for widget in self.prefValueWidgets:
if type(widget) == QtWidgets.QCheckBox:
prefName = widget.objectName().rsplit('_', 1)[0]
preferences.setPreference(prefName, widget.isChecked())
super(PreferenceEditor, self).accept()
def closeEvent(self, event):
pass
|
fsschneider/DeepOBS
|
docs/conf.py
|
Python
|
mit
| 5,559 | 0 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = u'DeepOBS'
copyright = u'2019, Frank Schneider'
author = u'Frank Schneider, Lukas Balles & Philipp Hennig'
# The short X.Y version
version = u'1.1'
# The full ver
|
sion, including alpha/beta/rc tags
release = u'1.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensi
|
ons = [
'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinxarg.ext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Theme options
html_theme_options = {
'collapse_navigation': False, # Collapse navigation
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepOBSdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepOBS.tex', u'DeepOBS Documentation',
u'Frank Schneider', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepobs', u'DeepOBS Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepOBS', u'DeepOBS Documentation',
author, 'DeepOBS', 'Documentation for the DeepOBS package.',
'Frank Schneider'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
smartquotes = False
|
emirot/codefights
|
python/primesSum.py
|
Python
|
apache-2.0
| 136 | 0.014706 |
def primesSum(a, b):
return sum([a for
|
a in range(a, b+1) if not (a < 2 or any(a % x == 0 for x in range
|
(2, int(a ** 0.5) + 1))) ])
|
Azure/azure-sdk-for-python
|
sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/aio/__init__.py
|
Python
|
mit
| 524 | 0.003817 |
# coding=utf-8
# ------------------
|
--------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._test_base import Tes
|
tBase
__all__ = ['TestBase']
|
alsrgv/tensorflow
|
tensorflow/python/keras/utils/generic_utils.py
|
Python
|
apache-2.0
| 19,553 | 0.007978 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to
|
in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."
|
""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import codecs
import marshal
import os
import re
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
_GLOBAL_CUSTOM_OBJECTS = {}
@keras_export('keras.utils.CustomObjectScope')
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
@keras_export('keras.utils.custom_object_scope')
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
@keras_export('keras.utils.get_custom_objects')
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_class_and_config(cls_name, cls_config):
"""Returns the serialization of the class with the given config."""
return {'class_name': cls_name, 'config': cls_config}
@keras_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
return serialize_keras_class_and_config(instance.__class__.__name__,
instance.get_config())
if hasattr(instance, '__name__'):
return instance.__name__
raise ValueError('Cannot serialize', instance)
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Returns the class name and config for a serialized keras object."""
if (not isinstance(config, dict) or 'class_name' not in config or
'config' not in config):
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
return (cls, config['config'])
@keras_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
return cls.from_config(
cls_config,
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**cls_config)
elif isinstance(identifier, six.string_types):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif object_name in _GLOBAL_CUSTOM_OBJECTS:
obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError('Unknown ' + printable_module_name + ':' + object_name)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
else:
raise ValueError('Could not interpret serialized ' + printable_module_name +
': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
code = codecs.encode(raw_code, 'base64').decode('ascii')
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the
|
ElliottMiller/python-azuread-sample
|
manage.py
|
Python
|
mit
| 336 | 0 |
#!/usr/bin/env python
"""
|
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE
|
",
"AzureAuthSample.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
hyperion-rt/hyperion
|
hyperion/grid/tests/test_io.py
|
Python
|
bsd-2-clause
| 6,744 | 0.000148 |
from copy import deepcopy
import h5py
import numpy as np
import pytest
import six
from ...util.functions import virtual_file
from .. import (CartesianGrid,
CylindricalPolarGrid,
SphericalPolarGrid,
AMRGrid,
OctreeGrid)
ALL_GRID_TYPES = ['car', 'sph', 'cyl', 'amr', 'oct']
def exc_msg(exc):
if isinstance(exc.value, six.string_types):
return exc.value
elif type(exc.value) is tuple:
return exc.value[0]
else:
return exc.value.args[0]
class TestView(object):
def setup_method(self, method):
# Set up grids
self.grid = {}
self.grid['car'] = CartesianGrid([-1., 1.],
[-2., 2.],
[-3., 3.])
self.grid['cyl'] = CylindricalPolarGrid([0., 1.],
[-1., 1.],
[0., 2. * np.pi])
self.grid['sph'] = SphericalPolarGrid([0., 1.],
[0., np.pi],
[0., 2. * np.pi])
self.grid['amr'] = AMRGrid()
level = self.grid['amr'].add_level()
grid = level.add_grid()
grid.xmin, grid.xmax = -1., 1.
grid.ymin, grid.ymax = -1., 1.
grid.zmin, grid.zmax = -1., 1.
grid.nx, grid.ny, grid.nz = 8, 8, 8
refined = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.grid['oct'] = OctreeGrid(0., 0., 0., 10., 10., 10.,
np.array(refined).astype(bool))
# Set up empty grid class
self.grid_empty = {}
self.grid_empty['car'] = CartesianGrid
self.grid_empty['cyl'] = CylindricalPolarGrid
self.grid_empty['sph'] = SphericalPolarGrid
self.grid_empty['amr'] = AMRGrid
self.grid_empty['oct'] = OctreeGrid
# Set up initial densities
self.density = {}
self.density['car'] = np.array([[[1.]]])
self.density['cyl'] = np.array([[[1.]]])
self.density['sph'] = np.array([[[1.]]])
amr_q = deepcopy(self.grid['amr'])
amr_q.levels[0].grids[0].quantities['density'] = np.ones((8, 8, 8))
self.density['amr'] = amr_q['density']
self.density['oct'] = np.ones(len(refined))
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_empty(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust is None
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_single(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 1
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
else:
assert type(h.quantities['density']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_double(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 2
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
else:
assert type(h.quantities['density']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_double_multiple(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g['density'].append(self.density[grid_type])
g['energy'] = []
g['energy'].append(self.density[grid_type])
g['energy'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 2
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
assert type(h.levels[0].grids[0].quantities['energy']) is list
else:
assert type(h.quantities['density']) is list
assert type(h.quantities['energy']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_type_mismatch(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
f['Geometry'].attrs['grid_type'] = 'invalid'.encode('utf-8')
h = self.grid_empty[grid_type]()
with pytest.raises(Excep
|
tion) as exc:
h.read(f)
if grid_type == 'car':
assert exc.value.args[0] == "Grid is not cartesian"
elif grid_type == 'cyl':
assert exc.value.args[0] == "Grid is not cylindrical polar"
elif grid_type == 'sph':
assert exc.value.args[0] == "Grid is not spherical polar"
|
elif grid_type == 'amr':
assert exc.value.args[0] == "Grid is not an AMR grid"
elif grid_type == 'oct':
assert exc.value.args[0] == "Grid is not an octree"
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_hash_mismatch(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
f['Geometry'].attrs['geometry'] = 'a4e2805a72dfcf01b2fd94da0be32511'.encode('utf-8')
h = self.grid_empty[grid_type]()
with pytest.raises(Exception) as exc:
h.read(f)
assert exc.value.args[0] == "Calculated geometry hash does not " \
"match hash in file"
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_groups_exist(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
f.create_group('Geometry')
f.create_group('Quantities')
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
assert h.n_dust == 1
|
OpenGenus/cosmos
|
code/mathematical_algorithms/src/perfect_number/perfect_number.py
|
Python
|
gpl-3.0
| 235 | 0 |
def is_perfect_number(n):
sum = 0
|
for x in range(1, n):
if n % x == 0:
sum += x
return sum == n
num = int(input("Please enter a number to check if it is perfect o
|
r not"))
print(is_perfect_number(num))
|
lorien/user_agent
|
setup.py
|
Python
|
mit
| 1,599 | 0.000625 |
import os
from setuptools import setup
ROOT = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(ROOT, 'README.rst'), encoding='utf-8') as inp:
LONG_DESCRIPTION = inp.read()
setup(
# Meta data
name='user_agent',
version='0.1.10',
author="Gregory Petukhov",
author_email='lorien@lorien.name',
maintainer="Gregory Petukhov",
maintainer_email='lorien@lorien.name',
url='https://github.com/lorien/user_agent',
description='User-Agent generator',
long_description=LONG_DESCRIPTION,
download_url='http://pypi.python.org/pypi/user_agent',
keywords="user agent browser navigator",
license="MIT License",
# Package files
packages=['user_agent'],
include_package_data=True,
install_requires=['six'],
entry_points={
'console_scripts': [
'ua = user_agent.cli:script_ua',
],
},
# Topics
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
|
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: MIT License',
#'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
|
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
],
)
|
jiayisuse/cs73
|
wp-admin/data_delete.py
|
Python
|
gpl-2.0
| 2,483 | 0.024567 |
#!/usr/bin/env python
import nltk
import os
import sys
import include
title = sys.argv[1].lower()
html = sys.argv[2].lower()
cate_id = sys.argv[3]
def do_read_train(uni_dict, bi_dict, file):
lines = file.readlines()
for line in lines:
words = line.split()
bi_dict[words[0]] = int(words[2])
uni_dict[words[0].split("|")[1]] = int(words[4])
return int(lines[0].split()[-1])
def frequency_update(uni_dict, bi_dict, new_uni_dict, new_bi_dict):
# update uni dict
for token in new_uni_dict.keys():
if uni_dict.has_key(token):
uni_dict[token] -=
|
new_uni_dict[token]
if uni_dict[to
|
ken] == 0:
del uni_dict[token]
# update bi dict
for key in new_bi_dict:
if bi_dict.has_key(key):
bi_dict[key] -= new_bi_dict[key]
if bi_dict[key] == 0:
del bi_dict[key]
def sort_dict_to(uni_dict, bi_dict, n, sorted_list):
for key in bi_dict:
first = key.split("|")[0]
second = key.split("|")[1]
sorted_list.append([key, float(bi_dict[key]) / uni_dict[second], bi_dict[key], float(uni_dict[second]) / n, uni_dict[second], n])
sorted_list = sorted(sorted_list, key = lambda x: x[4], reverse= True)
text = nltk.clean_html(html)
cate_dir = os.path.join(include.dataset_dir, cate_id)
if not os.access(cate_dir, os.F_OK):
os.makedirs(cate_dir)
file = open(os.path.join(cate_dir, title + ".txt"), "w")
file.write(text)
file.close()
train_file = os.path.join(cate_dir, cate_id + include.bi_train_suffix)
uni_dict = {}
bi_dict = {}
n = 0
try:
with open(train_file, "r") as file:
n = do_read_train(uni_dict, bi_dict, file)
file.close()
except IOError:
pass
tokens = include.my_tokenizer(text)
if "" in tokens:
tokens.remove("")
# read unigram frequency from new post
num_tokens = len(tokens)
new_uni_dict = {}
for token in tokens:
if new_uni_dict.has_key(token):
new_uni_dict[token] += 1
else:
new_uni_dict[token] = 1
# read bigram frequency from new post
new_bi_dict = {}
for i in range(1, len(tokens)):
key = tokens[i] + "|" + tokens[i - 1]
if new_bi_dict.has_key(key):
new_bi_dict[key] += 1
else:
new_bi_dict[key] = 1
frequency_update(uni_dict, bi_dict, new_uni_dict, new_bi_dict)
sorted_list = []
sort_dict_to(uni_dict, bi_dict, n - num_tokens, sorted_list)
file = open(train_file, "w")
file.truncate()
for item in sorted_list:
token = item[0]
bi_p = item[1]
bi_freq = item[2]
uni_p = item[3]
uni_freq = item[4]
nn = item[5]
file.write("%-30s %.8f %6d %16.8f %6s %9d\n" %(token, bi_p, bi_freq, uni_p, uni_freq, nn))
file.close()
|
alirazabhayani/django_workshop_poll_app
|
polls/urls.py
|
Python
|
bsd-3-clause
| 424 | 0.007075 |
from django.conf.
|
urls import patterns, url
from . import views
urlpattern
|
s = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name='results'),
url(r'^(?P<poll_id>\d+)/vote/$', views.vote, name='vote'),
url(r'^test/$' , views.test_view, name='test_view'),
)
|
zsiki/ulyxes
|
pyapps/measurematrix.py
|
Python
|
gpl-2.0
| 4,051 | 0.005431 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
.. module:: measurematrix.py
.. moduleauthor:: Jozsef Attila Janko, Bence Takacs, Zoltan Siki (code optimalization)
Sample application of Ulyxes PyAPI to measure within a rectangular area
:param argv[1] (int): number of horizontal intervals (between measurements), default 1 (perimeter only)
:param argv[2] (int): number of vertical intervals(between measurements), default 1 (perimeter only)
:param argv[3] (sensor): 1100/1800/1200/5500, default 1100
:param argv[4] (port): seria
|
l port, default COM5
:param argv[5]: output file, default stdout
usage: python measurematrix.py 9 3 1100 COM5
"""
import re
import sys
sys.path.append('../pyapi/')
from angle import Angle
from serialif
|
ace import SerialIface
from totalstation import TotalStation
from echowriter import EchoWriter
from filewriter import FileWriter
from leicatps1200 import LeicaTPS1200
from leicatcra1100 import LeicaTCRA1100
from trimble5500 import Trimble5500
if __name__ == "__main__":
if sys.version_info[0] > 2: # Python 3 compatibility
raw_input = input
if len(sys.argv) == 1:
print("Usage: {0:s} horizontal_step vertical_step instrument port output_file".format(sys.argv[0]))
exit(1)
# set horizontal stepping interval dh_nr
dh_nr = 1
if len(sys.argv) > 1:
try:
dh_nr = int(sys.argv[1])
except ValueError:
print("invalid numeric value " + sys.argv[1])
sys.exit(1)
# set vertical stepping interval dv_nr
dv_nr = 1
if len(sys.argv) > 2:
try:
dv_nr = int(sys.argv[2])
except ValueError:
print("invalid numeric value " + sys.argv[2])
#sys.exit(1)
# set instrument
stationtype = '1100'
if len(sys.argv) > 3:
stationtype = sys.argv[3]
if re.search('120[0-9]$', stationtype):
mu = LeicaTPS1200()
elif re.search('110[0-9]$', stationtype):
mu = LeicaTCRA1100()
elif re.search('550[0-9]$', stationtype):
mu = Trimble5500()
else:
print("unsupported instrument type")
sys.exit(1)
# set port
port = '/dev/ttyUSB0'
if len(sys.argv) > 4:
port = sys.argv[4]
iface = SerialIface("test", port)
# set output file name
fn = None
if len(sys.argv) > 5:
fn = sys.argv[5]
# write out measurements
if fn:
wrt = FileWriter(angle='DEG', dist='.3f', fname=fn)
else:
wrt = EchoWriter(angle='DEG', dist='.3f')
if wrt.GetState() != wrt.WR_OK:
sys.exit(-1) # open error
ts = TotalStation(stationtype, mu, iface, wrt)
if isinstance(mu, Trimble5500):
print("Please change to reflectorless EDM mode (MNU 722 from keyboard)")
print("and turn on red laser (MNU 741 from keyboard) and press enter!")
raw_input()
else:
ts.SetATR(0) # turn ATR off
ts.SetEDMMode('RLSTANDARD') # reflectorless distance measurement
ts.SetRedLaser(1) # turn red laser on
w = raw_input("Target on lower left corner and press Enter")
w1 = ts.GetAngles()
w = raw_input("Target on upper right corner and press Enter")
w2 = ts.GetAngles()
dh = (w2['hz'].GetAngle() - w1['hz'].GetAngle()) / dh_nr
dv = (w2['v'].GetAngle() - w1['v'].GetAngle()) / dv_nr
# measurement loops
for i in range(dh_nr+1): # horizontal loop
measdir = i % 2 # check modulo
hz = Angle(w1['hz'].GetAngle() + i * dh, 'RAD')
for j in range(dv_nr+1): # vertical loop
if measdir == 0:
# move downward at odd steps to right
ts.Move(hz, Angle(w1['v'].GetAngle() + j * dv, 'RAD'))
else:
# move upward at event steps to right
ts.Move(hz, Angle(w2['v'].GetAngle() - j * dv, 'RAD'))
ts.Measure()
meas = ts.GetMeasure()
if ts.measureIface.state != ts.measureIface.IF_OK or 'errorCode' in meas:
print('FATAL Cannot measure point')
|
immenz/pyload
|
module/plugins/accounts/SimplyPremiumCom.py
|
Python
|
gpl-3.0
| 1,650 | 0.008485 |
# -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.Account import Account
class SimplyPremiumCom(Account):
__name__ = "SimplyPremiumCom"
__type__ = "account"
__version__ = "0.05"
__description__ = """Simply-Premium.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "evolutionclip@live.de")]
def loadAccountInfo(self, user, req):
premium = False
validuntil = -1
trafficleft = None
json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
|
self.logDebug("JSON data: %s" % json_data)
json_data = json_loads(json_data)
if 'vip' in json_data['result'] and json_data['result']['vip']:
premium = True
if 'timeend' in json_data['result'] and json_data['result']['timeend']:
validuntil = fl
|
oat(json_data['result']['timeend'])
if 'remain_traffic' in json_data['result'] and json_data['result']['remain_traffic']:
trafficleft = float(json_data['result']['remain_traffic']) / 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {"premium": premium, "validuntil": validuntil, "trafficleft": trafficleft}
def login(self, user, data, req):
req.cj.setCookie("simply-premium.com", "lang", "EN")
html = req.load("http://www.simply-premium.com/login.php",
post={'key': user} if not data['password'] else {'login_name': user, 'login_pass': data['password']},
decode=True)
if 'logout' not in html:
self.wrongPassword()
|
willysbrewing/willys_website
|
willys_website/core/migrations/0002_auto_20170129_1714.py
|
Python
|
apache-2.0
| 7,007 | 0.001284 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-29 16:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0016_deprecate_rendition_filter_relation'),
('wagtailcore', '0032_add_bulk_delete_page_permission'),
('willys_website', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LandingPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html'))))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='LandingPageHero',
fields=[
('heroitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='willys_website.HeroItem')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='hero', to='willys_website.LandingPage')),
],
|
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=('willys_website.heroitem', models.Model),
),
migrations.RenameField(
model_name='productpage',
old_name='cost',
new_name='price',
),
migrations.RemoveField(
model_name='heroitem',
name='position',
),
migrations.AddField(
model_name='pro
|
ductpage',
name='bg_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='productpage',
name='ibu',
field=models.CharField(default='notset', max_length=255),
),
migrations.AddField(
model_name='productpage',
name='proof',
field=models.CharField(default='notset', max_length=255),
),
migrations.AddField(
model_name='productpage',
name='style',
field=models.CharField(default='notset', max_length=255),
),
migrations.AddField(
model_name='productpage',
name='subtitle',
field=models.CharField(default='notset', max_length=255),
),
migrations.AlterField(
model_name='blogpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html')))),
),
migrations.AlterField(
model_name='eventpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html')))),
),
migrations.AlterField(
model_name='productpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html')))),
),
migrations.AlterField(
model_name='productpage',
name='name',
field=models.CharField(max_length=255),
),
]
|
tensorflow/neural-structured-learning
|
research/gam/gam/trainer/trainer_classification_gcn.py
|
Python
|
apache-2.0
| 38,149 | 0.004351 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer for classification models for Graph Agreement Models without a graph.
This class contains functionality that allows for training a classification
model to be used as part of Graph Agreement Models.
This implementation does not use a provided graph, but samples random pairs
of samples.
"""
import logging
import os
from .adversarial_sparse import entropy_y_x
from .adversarial_sparse import get_loss_vat
import numpy as np
import tensorflow as tf
from .trainer_base import batch_iterator
from .trainer_base import Trainer
class TrainerClassificationGCN(Trainer):
"""Trainer for the classifier component of a Graph Agreement Model.
Attributes:
model: A Model object that is used to provide the architecture of the
classification model.
is_train: A placeholder for a boolean value specyfing if the model is used
for train or evaluation.
data: A CotrainDataset object.
trainer_agr: A TrainerAgreement or TrainerPerfectAgreement object.
optimizer: Optimizer used for training the classification model.
batch_size: Batch size for used when training and evaluating the
classification model.
gradient_clip: A float number representing the maximum gradient norm allowed
if we do gradient clipping. If None, no gradient clipping is performed.
min_num_iter: An integer representing the minimum number of iterations to
train the classification model.
max_num_iter: An integer representing the maximum number of iterations to
train the classification model.
num_iter_after_best_val: An integer representing the number of extra
iterations to perform after improving the validation set accuracy.
max_num_iter_cotrain: An integer representing the maximum number of cotrain
iterations to train for.
reg_weight_ll: A float representing the weight of the agreement loss term
component of the classification model loss function, between
labeled-labeled pairs of samples.
reg_weight_lu: A float representing the weight of the agreement loss term
component of the classification model loss function, between
labeled-unlabeled pairs of samples.
reg_weight_uu: A float representing the weight of the agreement loss term
component of the classification model loss function, between
unlabeled-unlabeled pairs of samples.
num_pairs_reg: An integer representing the number of sample pairs of each
type (LL, LU, UU) to include in each computation of the classification
model loss.
iter_cotrain: A Tensorflow variable containing the current cotrain
iteration.
reg_weight_vat: A float representing the weight of the virtual adversarial
training (VAT) regularization loss in the classification model loss
function.
use_ent_min: A boolean specifying whether to use entropy regularization with
VAT.
enable_summaries: Boolean specifying whether to enable variable summaries.
summary_step: Integer representing the summary step size.
summary_dir: String representin
|
g the path to a directory where to save the
variable summaries.
logging_step: Integer repres
|
enting the number of iterations after which we
log the loss of the model.
eval_step: Integer representing the number of iterations after which we
evaluate the model.
warm_start: Whether the model parameters are initialized at their best value
in the previous cotrain iteration. If False, they are reinitialized.
gradient_clip=None,
abs_loss_chg_tol: A float representing the absolute tolerance for checking
if the training loss has converged. If the difference between the current
loss and previous loss is less than `abs_loss_chg_tol`, we count this
iteration towards convergence (see `loss_chg_iter_below_tol`).
rel_loss_chg_tol: A float representing the relative tolerance for checking
if the training loss has converged. If the ratio between the current loss
and previous loss is less than `rel_loss_chg_tol`, we count this iteration
towards convergence (see `loss_chg_iter_below_tol`).
loss_chg_iter_below_tol: An integer representing the number of consecutive
iterations that pass the convergence criteria before stopping training.
checkpoints_dir: Path to the folder where to store TensorFlow model
checkpoints.
weight_decay: Weight for the weight decay term in the classification model
loss.
weight_decay_schedule: Schedule how to adjust the classification weight
decay weight after every cotrain iteration.
penalize_neg_agr: Whether to not only encourage agreement between samples
that the agreement model believes should have the same label, but also
penalize agreement when two samples agree when the agreement model
predicts they should disagree.
first_iter_original: A boolean specifying whether the first cotrain
iteration trains the original classification model (with no agreement
term).
use_l2_classif: Whether to use L2 loss for classification, as opposed to the
whichever loss is specified in the provided model_cls.
seed: Seed used by all the random number generators in this class.
use_graph: Boolean specifying whether the agreement loss is applied to graph
edges, as opposed to random pairs of samples.
"""
def __init__(self,
model,
data,
trainer_agr,
optimizer,
lr_initial,
batch_size,
min_num_iter,
max_num_iter,
num_iter_after_best_val,
max_num_iter_cotrain,
reg_weight_ll,
reg_weight_lu,
reg_weight_uu,
num_pairs_reg,
iter_cotrain,
reg_weight_vat=0.0,
use_ent_min=False,
enable_summaries=False,
summary_step=1,
summary_dir=None,
warm_start=False,
gradient_clip=None,
logging_step=1,
eval_step=1,
abs_loss_chg_tol=1e-10,
rel_loss_chg_tol=1e-7,
loss_chg_iter_below_tol=30,
checkpoints_dir=None,
weight_decay=None,
weight_decay_schedule=None,
penalize_neg_agr=False,
first_iter_original=True,
use_l2_classif=True,
seed=None,
lr_decay_steps=None,
lr_decay_rate=None,
use_graph=False):
super(TrainerClassificationGCN, self).__init__(
model=model,
abs_loss_chg_tol=abs_loss_chg_tol,
rel_loss_chg_tol=rel_loss_chg_tol,
loss_chg_iter_below_tol=loss_chg_iter_below_tol)
self.data = data
self.trainer_agr = trainer_agr
self.batch_size = batch_size
self.min_num_iter = min_num_iter
self.max_num_iter = max_num_iter
self.num_iter_after_best_val = num_iter_after_best_val
self.max_num_iter_cotrain = max_num_iter_cotrain
self.enable_summaries = enable_summaries
self.summary_step = summary_step
self.summary_dir = summary_dir
self.warm_start = warm_start
self.gradient_clip = gradient_clip
self.logging_step = logging_step
self.eval_step = eval_step
self.checkpoint_path = (
os.path.join(checkpoints_dir, 'classif_best.ckpt')
if checkpoints_dir is not None else None)
self.weight_decay_initial = weight_decay
self.weight_decay_schedule = weight_decay_schedule
self.num_pai
|
bwhmather/python-payment-terminal
|
payment_terminal/tests/__init__.py
|
Python
|
bsd-3-clause
| 320 | 0 |
import unittest
from payment_terminal.tests imp
|
ort test_loader
import payment_terminal.drivers.bbs.tests as test_bbs
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite((
loader.loadTestsFromModule(test_bbs),
loader.loadTestsFromModule(test_loader),
))
|
return suite
|
ryfeus/lambda-packs
|
Tensorflow_Pandas_Numpy/source3.6/tensorboard/plugins/beholder/beholder.py
|
Python
|
mit
| 7,267 | 0.007706 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from tensorboard.plugins.beholder import im_util
from tensorboard.plugins.beholder.file_system_tools import read_pickle,\
write_pickle, write_file
from tensorboard.plugins.beholder.shared_config import PLUGIN_NAME, TAG_NAME,\
SUMMARY_FILENAME, DEFAULT_CONFIG, CONFIG_FILENAME
from tensorboard.plugins.beholder import video_writing
from tensorboard.plugins.beholder.visualizer import Visualizer
class Beholder(object):
def __init__(self, logdir):
self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME
self.is_recording = False
self.video_writer = video_writing.VideoWriter(
self.PLUGIN_LOGDIR,
outputs=[
video_writing.FFmpegVideoOutput,
video_writing.PNGVideoOutput])
self.frame_placeholder = tf.placeholder(tf.uint8, [None, None, None])
self.summary_op = tf.summary.tensor_summary(TAG_NAME,
self.frame_placeholder)
self.last_image_shape = []
self.last_update_time = time.time()
self.config_last_modified_time = -1
self.previous_config = dict(DEFAULT_CONFIG)
if not tf.gfile.Exists(self.PLUGIN_LOGDIR + '/
|
config.pkl'):
|
tf.gfile.MakeDirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG, '{}/{}'.format(self.PLUGIN_LOGDIR,
CONFIG_FILENAME))
self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
def _get_config(self):
'''Reads the config file from disk or creates a new one.'''
filename = '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)
modified_time = os.path.getmtime(filename)
if modified_time != self.config_last_modified_time:
config = read_pickle(filename, default=self.previous_config)
self.previous_config = config
else:
config = self.previous_config
self.config_last_modified_time = modified_time
return config
def _write_summary(self, session, frame):
'''Writes the frame to disk as a tensor summary.'''
summary = session.run(self.summary_op, feed_dict={
self.frame_placeholder: frame
})
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
write_file(summary, path)
def _get_final_image(self, session, config, arrays=None, frame=None):
if config['values'] == 'frames':
if frame is None:
final_image = im_util.get_image_relative_to_script('frame-missing.png')
else:
frame = frame() if callable(frame) else frame
final_image = im_util.scale_image_for_display(frame)
elif config['values'] == 'arrays':
if arrays is None:
final_image = im_util.get_image_relative_to_script('arrays-missing.png')
# TODO: hack to clear the info. Should be cleaner.
self.visualizer._save_section_info([], [])
else:
final_image = self.visualizer.build_frame(arrays)
elif config['values'] == 'trainable_variables':
arrays = [session.run(x) for x in tf.trainable_variables()]
final_image = self.visualizer.build_frame(arrays)
if len(final_image.shape) == 2:
# Map grayscale images to 3D tensors.
final_image = np.expand_dims(final_image, -1)
return final_image
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time
def _update_frame(self, session, arrays, frame, config):
final_image = self._get_final_image(session, config, arrays, frame)
self._write_summary(session, final_image)
self.last_image_shape = final_image.shape
return final_image
def _update_recording(self, frame, config):
'''Adds a frame to the current video output.'''
# pylint: disable=redefined-variable-type
should_record = config['is_recording']
if should_record:
if not self.is_recording:
self.is_recording = True
tf.logging.info(
'Starting recording using %s',
self.video_writer.current_output().name())
self.video_writer.write_frame(frame)
elif self.is_recording:
self.is_recording = False
self.video_writer.finish()
tf.logging.info('Finished recording')
# TODO: blanket try and except for production? I don't someone's script to die
# after weeks of running because of a visualization.
def update(self, session, arrays=None, frame=None):
'''Creates a frame and writes it to disk.
Args:
arrays: a list of np arrays. Use the "custom" option in the client.
frame: a 2D np array. This way the plugin can be used for video of any
kind, not just the visualization that comes with the plugin.
frame can also be a function, which only is evaluated when the
"frame" option is selected by the client.
'''
new_config = self._get_config()
if self._enough_time_has_passed(self.previous_config['FPS']):
self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(session, arrays, frame, new_config)
self._update_recording(final_image, new_config)
##############################################################################
@staticmethod
def gradient_helper(optimizer, loss, var_list=None):
'''A helper to get the gradients out at each step.
Args:
optimizer: the optimizer op.
loss: the op that computes your loss value.
Returns: the gradient tensors and the train_step op.
'''
if var_list is None:
var_list = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
grads = [pair[0] for pair in grads_and_vars]
return grads, optimizer.apply_gradients(grads_and_vars)
class BeholderHook(tf.train.SessionRunHook):
"""SessionRunHook implementation that runs Beholder every step.
Convenient when using tf.train.MonitoredSession:
```python
beholder_hook = BeholderHook(LOG_DIRECTORY)
with MonitoredSession(..., hooks=[beholder_hook]) as sess:
sess.run(train_op)
```
"""
def __init__(self, logdir):
"""Creates new Hook instance
Args:
logdir: Directory where Beholder should write data.
"""
self._logdir = logdir
self.beholder = None
def begin(self):
self.beholder = Beholder(self._logdir)
def after_run(self, run_context, unused_run_values):
self.beholder.update(run_context.session)
|
expfactory/expfactory
|
expfactory/database/relational.py
|
Python
|
bsd-3-clause
| 8,107 | 0.000617 |
"""
Copyright (c) 2017-2022, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from expfactory.logger import bot
from expfactory.utils import write_json
from expfactory.defaults import EXPFACTORY_SUBID, EXPFACTORY_DATA
from glob import glob
import os
import uuid
import pickle
import json
import sys
# RELATIONAL ###################################################################
#
# This is an Expfactory Flask Server database plugin. It implements common
# functions (generate_subid, save_data, init_db) that should prepare a
# database and perform actions to save data to it. The functions are added
# to the main application upon initialization of the server. This relational
# module has support for sqlite3, mysql, and postgres
#
################################################################################
def generate_subid(self, token=None, return_user=False):
"""generate a new user in the database, still session based so we
create a new identifier.
"""
from expfactory.database.models import Participant
if not token:
p = Participant()
else:
p = Participant(token=token)
self.session.add(p)
self.session.commit()
if return_user is True:
return p
return p.id
def print_user(self, user):
"""print a relational database user"""
status = "active"
token = user.token
if token in ["finished", "revoked"]:
status = token
if token is None:
token = ""
subid = "%s\t%s[%s]" % (user.id, token, status)
print(subid)
return subid
def list_users(self, user=None):
"""list users, each having a model in the database. A headless experiment
will use protected tokens, and interactive will be based on auto-
incremented ids.
"""
from expfactory.database.models import Participant
participants = Participant.query.all()
users = []
for user in participants:
users.append(self.print_user(user))
return users
# Actions ######################################################################
def generate_user(self):
"""generate a new user in the database, still session based so we
create a new identifier. This function is called from the users new
entrypoint, and it assumes we want a user generated with a token.
"""
token = str(uuid.uuid4())
return self.generate_subid(token=token, return_user=True)
def finish_user(self, subid):
"""finish user will remove a user's token, making the user entry not
accesible if running in headless model"""
p = self.revoke_token(subid)
p.token = "finished"
self.session.commit()
return p
def restart_user(self, subid):
"""restart a user, which means revoking and issuing a new token."""
p = self.revoke_token(subid)
p = self.refresh_token(subid)
return p
# Tokens #######################################################################
def validate_token(self, token):
"""retrieve a subject based on a token. Valid means we return a participant
invalid means we return None
"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.token == token).first()
if p is not None:
if p.token.endswith(("finished", "revoked")):
p = None
else:
p = p.id
return p
def revoke_token(self, subid):
"""revoke a token by removing it. Is done at finish, and also available
as a command line option"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = "revoked"
self.session.commit()
return p
def ref
|
resh_token(self, subid):
"""refresh or generate a new token for a user"""
from expfactory.database.models import Participant
|
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = str(uuid.uuid4())
self.session.commit()
return p
def save_data(self, session, exp_id, content):
"""save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files"""
from expfactory.database.models import Participant, Result
subid = session.get("subid")
token = session.get("token")
self.logger.info("Saving data for subid %s" % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(
Participant.id == subid
).first() # better query here
# Does
if self.headless and p.token != token:
self.logger.warning(
"%s attempting to use mismatched token [%s] skipping save"
% (p.id, token)
)
elif self.headless and p.token.endswith(("finished", "revoked")):
self.logger.warning(
"%s attempting to use expired token [%s] skipping save" % (p.id, token)
)
else:
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content["data"]
result = Result(
data=content, exp_id=exp_id, participant_id=p.id
) # check if changes from str/int
# Create and save the result
self.session.add(result)
p.results.append(result)
self.session.commit()
self.logger.info("Save [participant] %s [result] %s" % (p, result))
Base = declarative_base()
def init_db(self):
"""initialize the database, with the default database path or custom with
a format corresponding to the database type:
Examples:
sqlite:////scif/data/expfactory.db
"""
# The user can provide a custom string
if self.database is None:
self.logger.error("You must provide a database url, exiting.")
sys.exit(1)
self.engine = create_engine(self.database, convert_unicode=True)
self.session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
)
# Database Setup
Base.query = self.session.query_property()
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import expfactory.database.models
self.Base = Base
self.Base.metadata.create_all(bind=self.engine)
|
jfinkels/flask-restless
|
flask_restless/search/drivers.py
|
Python
|
agpl-3.0
| 6,012 | 0 |
# drivers.py - high-level functions for filtering SQLAlchemy queries
#
# Copyright 2011 Lincoln de Sousa <lincoln@comum.org>.
# Copyright 2012, 2013, 2014, 2015, 2016 Jeffrey Finkelstein
# <jeffrey.finkelstein@gmail.com> and contributors.
#
# This file is part of Flask-Restless.
#
# Flask-Restless is distributed under both the GNU Affero General Public
# License version 3 and under the 3-clause BSD license. For more
# information, see LICENSE.AGPL and LICENSE.BSD.
"""High-level functions for creating filtered SQLAlchemy queries.
The :func:`search` and :func:`search_relationship` functions return
filtered queries on a SQLAlchemy model. The latter specifically
restricts the query to only those instances of a model that are related
to a particular object via a given to-many relationship.
"""
from sqlalchemy.orm import aliased
from sqlalchemy.sql import false as FALSE
from ..helpers import get_model
from ..helpers import get_related_model
from ..helpers import primary_key_names
from ..helpers import primary_key_value
from ..helpers import session_query
from .filters import create_filters
def search_relationship(session, instance, relation, filters=None, sort=None,
group_by=None, ignorecase=False):
"""Returns a filtered, sorted, and grouped SQLAlchemy query
restricted to those objects related to a given instance.
`session` is the SQLAlchemy session in which to create the query.
`instance` is an instance of a SQLAlchemy model whose relationship
will be queried.
` `relation` is a string naming a to-many relationship of `instance`.
`filters`, `sort`, `group_by`, and `ignorecase` are identical to the
corresponding arguments of :func:`.search`.
"""
model = get_model(instance)
related_model = get_related_model(model, relation)
query = session_query(session, related_model)
# Filter by only those related values that are related to `instance`.
relationship = getattr(instance, relation)
# TODO In Python 2.7+, this should be a set comprehension.
primary_keys = set(primary_key_value(inst) for inst in relationship)
# If the relationship is empty, we can avoid a potentially expensive
# filtering operation by simply returning an intentionally empty
# query.
if not primary_keys:
return query.filter(FALSE())
query = query.filter(primary_key_value(related_model).in_(primary_keys))
return search(session, related_model, filters=filters, sort=sort,
group_by=group_by, ignorecase=ignorecase,
_initial_query=query)
def search(session, model, filters=None, sort=None, group_by=None,
ignorecase=False, _initial_query=None):
"""Returns a filtered, sorted, and grouped SQLAlchemy query.
`session` is the SQLAlchemy session in which to create the query.
`model` is the SQLAlchemy model on which to create a query.
`filters` is a list of filter objects. Each filter object is a
dictionary representation of the filters to apply to the
query. (This dictionary is provided directly to the
:func:`.filters.create_filters` function.) For more information on
the format of this dictionary, see :doc:`filtering`.
`sort` is a list of pairs of the form ``(direction, fieldname)``,
where ``direction`` is either '+' or '-' and ``fieldname`` is a
string representing an attribute of the model or a dot-separated
relationship path (for example, 'owner.name'). If `ignorecase` is
True, the sorting will be case-insensitive (so 'a' will precede 'B'
instead of the default behavior in which 'B' precedes 'a').
`group_by` is a list of dot-separated relationship paths on which to
group the query results.
If `_initial_query` is provided, the filters, sorting, and grouping
will be appended to this query. Otherwise, an empty query will be
created for the specified model.
When building the query, filters are applied first, then sorting,
then grouping.
"""
query = _initial_query
if query is None:
query = session_query(session, model)
# Filter the query.
#
# This function call may raise an exception.
filters = create_filters(model, filters)
query = query.filter(*filters)
# Order the query. If no order field is specified, order by primary
# key.
# if not _ignore_sort:
if sort:
for (symbol, field_name) in sort:
direction_name = 'asc' if symbol == '+' else 'desc'
if '.' in field_name:
field_name, field_name_in_relation = field_name.split('.')
relation_model = aliased(get_related_model(model, field_name))
field = getattr(relation_model, field_name_in_relation)
if ignorecase:
field = field.collate('NOCASE')
direction = getattr(field, direction_name)
query = query.join(relation_model)
query = query.order_by(direction())
else:
field = getattr(model, field_name)
if ignorecase:
field = field.collate('NOCASE')
|
direction = getattr(field, direction_name)
query = query.order_by(direction())
else:
pks = primary_key_names(model)
pk_order = (getattr(model, field).asc() for field in pks)
query = query.order_by(*pk_order)
# Group the query.
if group_by:
for field_name in group_by:
if '.' in field_name:
|
field_name, field_name_in_relation = field_name.split('.')
relation_model = aliased(get_related_model(model, field_name))
field = getattr(relation_model, field_name_in_relation)
query = query.join(relation_model)
query = query.group_by(field)
else:
field = getattr(model, field_name)
query = query.group_by(field)
return query
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/cliff/tests/test_interactive.py
|
Python
|
mit
| 1,743 | 0 |
# -*- encoding: utf-8 -*-
from cliff.interactive import InteractiveApp
class FakeApp(object):
NAME = 'Fake'
def make_interactive_app(*command_names):
fake_command_manager = [(x, None) for x in command_names]
return InteractiveApp(FakeApp, fake_command_manager,
stdin=None, stdout=None)
def _test_completenames(expecteds, prefix):
app = make_interactive_app('hips', 'hippo', 'nonmatching')
assert set(app.completenames(prefix)) == set(expecteds)
def test_cmd2_completenames():
# cmd2.Cmd define do_help method
_test_completenames(['help'], 'he')
def test_cliff_completenames():
_test_completenames(['hips', 'hippo'], 'hip')
def test_no_completenames():
_test_completenames([], 'taz')
def test_both_completenames():
# cmd2.Cmd d
|
efine do_hi and do_history methods
_test_completenames(['hi', 'history', 'hips', 'hippo'], 'hi')
def _test_completedefault(expecteds, line, begidx):
command_names = set(['show file', 'show folder', 'show long', 'list all'])
app = make_interactive_app(*command_names)
observeds = app.completedefault(None, line, begidx, None)
assert set(observeds) == set(expecteds)
assert set([line[:begidx] + x for x in observeds]) <= command_names
def test_emp
|
ty_text_completedefault():
# line = 'show ' + begidx = 5 implies text = ''
_test_completedefault(['file', 'folder', ' long'], 'show ', 5)
def test_nonempty_text_completedefault2():
# line = 'show f' + begidx = 6 implies text = 'f'
_test_completedefault(['file', 'folder'], 'show f', 5)
def test_long_completedefault():
_test_completedefault(['long'], 'show ', 6)
def test_no_completedefault():
_test_completedefault([], 'taz ', 4)
|
thetoine/eruditorg
|
erudit/core/subscription/rules.py
|
Python
|
gpl-3.0
| 748 | 0.001337 |
import rules
from rules.predicates import is_staff, is_superuser
from .models import Policy
@rules.predicate
def is_policy_manager(user, policy=None):
if policy is None:
|
return bool(Policy.objects.filter(managers=user).count())
else:
return bool(policy.managers.filter(id=user.id).count())
@rules.predicate
def is_account
|
_manager(user, account=None):
if account is None:
return bool(Policy.objects.filter(managers=user).count())
else:
return is_policy_manager(user, account.policy)
rules.add_perm('subscription.manage_policy',
is_superuser | is_staff | is_policy_manager)
rules.add_perm('subscription.manage_account',
is_superuser | is_staff | is_account_manager)
|
wpoa/wiki-imports
|
lib/python2.7/site-packages/pywikibot-2.0b1-py2.7.egg/pywikibot/families/incubator_family.py
|
Python
|
gpl-3.0
| 373 | 0 |
# -*- coding: utf-8 -*-
__version__ = '$Id: 11c92177ab93084552b8d68021da6545c4b7674f $'
from pywikibot import family
# The Wikimedia Incubator family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'incubator'
|
sel
|
f.langs = {
'incubator': 'incubator.wikimedia.org',
}
|
mgaitan/waliki_flask
|
waliki/markup.py
|
Python
|
bsd-3-clause
| 7,181 | 0.003343 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Martín Gaitán
# Copyright (c) 2012-2013, Alexander Jung-Loddenkemper
# This file is part of Waliki (http://waliki.nqnwebs.com/)
# License: BSD (https://github.com/mgaitan/waliki/blob/master/LICENSE)
#===============================================================================
# DOCS
#===============================================================================
"""All supported markups
"""
#===============================================================================
# IMPORTS
#===============================================================================
import re
import docutils.core
import docutils.io
import markdown
import textwrap
from rst2html5 import HTML5Writer
import wiki
#===============================================================================
# MARKUP BASE
#===============================================================================
class Markup(object):
""" Base markup class."""
NAME = 'Text'
META_LINE = '%s: %s\n'
EXTENSION = '.txt'
HOWTO = """ """
def __init__(self, raw_content):
self.raw_content = raw_content
@classmethod
def render_meta(cls, key, value):
return cls.META_LINE % (key, value)
def process(self):
"""
return (html, body, meta) where HTML is the rendered output
body is the the editable content (text), and meta is
a dictionary with at least ['title', 'tags'] keys
"""
raise NotImplementedError("override in a subclass")
@classmethod
def howto(cls):
return cls(textwrap.dedent(cls.HOWTO)).process()[0]
#===============================================================================
# MARKDOWN
#===============================================================================
class Markdown(Markup):
NAME = 'markdown'
META_LINE = '%s: %s\n'
EXTENSION = '.md'
HOWTO = """
This editor is [markdown][] featured.
* I am
* a
* list
Turns into:
* I am
* a
* list
`**bold** and *italics*` turn into **bold** and *italics*. Very easy!
Create links with `[Wiki](http://github.com/alexex/wiki)`.
They turn into [Wiki][http://github.com/alexex/wiki].
Headers are as follows:
# Level 1
## Level 2
### Level 3
[markdown]: http://daringfireball.net/projects/markdown/
"""
def process(self):
# Processes Markdown text to HTML, returns original markdown text,
# and adds meta
md = markdown.Markdown(['codehilite', 'fenced_code', 'meta'])
html = md.convert(self.raw_content)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = md.Meta
return html, body, meta
#===============================================================================
# RESTRUCTURED TEXT
#===============================================================================
class RestructuredText(Markup):
NAME = 'restructuredtext'
META_LINE = '.. %s: %s\n'
IMAGE_LINE = '.. image:: %(url)s'
LINK_LINE = '`%(filename)s <%(url)s>`_'
EXTENSION = '.rst'
HOWTO = """
This editor is `reStructuredText`_ featured::
* I am
* a
* list
Turns into:
* I am
* a
* list
``**bold** and *italics*`` turn into **bold** and *italics*. Very easy!
Create links with ```Wiki <http://github.com/alexex/wiki
|
>`_``.
They turn into `Wiki <https://github.c
|
om/alexex/wiki>`_.
Headers are just any underline (and, optionally, overline).
For example::
Level 1
*******
Level 2
-------
Level 3
+++++++
.. _reStructuredText: http://docutils.sourceforge.net/rst.html
"""
def process(self):
settings = {'initial_header_level': 2,
'record_dependencies': True,
'stylesheet_path': None,
'link_stylesheet': True,
'syntax_highlight': 'short',
}
html = self._rst2html(self.raw_content,
settings_overrides=settings)
# Convert unknow links to internal wiki links.
# Examples:
# Something_ will link to '/something'
# `something great`_ to '/something_great'
# `another thing <thing>`_ '/thing'
refs = re.findall(r'Unknown target name: "(.*)"', html)
if refs:
content = self.raw_content + self.get_autolinks(refs)
html = self._rst2html(content, settings_overrides=settings)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = self._parse_meta(meta_lines.split('\n'))
return html, body, meta
def get_autolinks(self, refs):
autolinks = '\n'.join(['.. _%s: /%s' % (ref, wiki.urlify(ref, False))
for ref in refs])
return '\n\n' + autolinks
def _rst2html(self, source, source_path=None,
source_class=docutils.io.StringInput,
destination_path=None, reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext', writer=None,
writer_name=None, settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
if not writer:
writer = HTML5Writer()
# Taken from Nikola
# http://bit.ly/14CmQyh
output, pub = docutils.core.publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=docutils.io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts['body']
def _parse_meta(self, lines):
""" Parse Meta-Data. Taken from Python-Markdown"""
META_RE = re.compile(r'^\.\.\s(?P<key>.*?): (?P<value>.*)')
meta = {}
key = None
for line in lines:
if line.strip() == '':
continue
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
return meta
#===============================================================================
# MAIN
#===============================================================================
if __name__ == "__main__":
print(__doc__)
|
mlperf/inference_results_v0.7
|
closed/Intel/code/resnet/resnet-ov/py-bindings/convert.py
|
Python
|
apache-2.0
| 2,497 | 0.007609 |
import sys
import errno
import json
import os
from argparse import ArgumentParser
sys.path.insert(1, 'py-bindings')
from squad import SQUADConverter
def get_samples(test_file, vocab_file, output_dir):
print("Test file:", test_file)
print("Vocab file:", vocab_file)
print("Output dir:", output_dir)
max_seq_length = 384
max_query_length = 64
doc_stride = 128
lower_case = False
sqd = SQUADConverter(test_file, vocab_file, max_seq_length, max_query_length, doc_stride, lower_case)
samples = sqd.convert()
# Dump samples to json
print("--Dumping examples to json--")
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + "/squad_examples.json"
c = 0
with open(output_file, 'w', encoding='utf-8') as fid:
json.dump({'samples':samples}, fid, ensure_ascii=False, indent=4)
return c
def get_arguments():
parser = ArgumentParser()
parser.add_argument("--test_file", type=str, help="Path to squad test json file", required=True)
parser.add_argument("--vocab_file", type=str, help="Path to vocab.txt file", required=True)
parser.add_argument("--max_seq_length", type=int, help="Max sequence length", default=384)
parser.add_argument("--max_query_length", type=int, help="Max query length", default=64)
parser.add_argument("--doc_stride", type=int, help="Document stride", default=128)
parser.add_argument("--lower_case", type=bool, help="Lower case", default=1)
parser.add_argument("--output_dir", type=str, help="Output directory for saved json", default="samples_cache")
return parser.parse_arg
|
s()
def main():
args = get_arguments()
if not os.path.isfile(args.test_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.test_file)
if not os.path.isfile
|
(args.vocab_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.vocab_file)
sqd = SQUADConverter(args.test_file, args.vocab_file, args.max_seq_length, args.max_query_length, args.doc_stride, args.lower_case)
# Convert examples
print("--Reading samples--")
samples = sqd.convert()
# Dump samples ot json
print("--Dumping examples to json--")
os.makedirs(args.output_dir, exist_ok=True)
output_file = args.output_dir + "/squad_examples.json"
with open(output_file, 'w', encoding='utf-8') as fid:
json.dump({'samples':samples}, fid, ensure_ascii=False, indent=4)
if __name__=="__main__":
main()
|
saltstack/salt
|
salt/modules/aptpkg.py
|
Python
|
apache-2.0
| 105,007 | 0.001 |
"""
Support for APT (Advanced Packaging Tool)
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
For repository management, the ``python-apt`` package must be installed.
"""
import copy
import datetime
import fnmatch
import logging
import os
import pathlib
import re
import shutil
import tempfile
import time
from urllib.error import HTTPError
from urllib.request import Request as _Request
from urllib.request import urlopen as _urlopen
import salt.config
import salt.syspaths
import salt.utils.args
import salt.utils.data
import salt.utils.environment
import salt.utils.files
import salt.utils.functools
import salt.utils.itertools
import salt.utils.json
import salt.utils.path
import salt.utils.pkg
import salt.utils.pkg.deb
import salt.utils.stringutils
import salt.utils.systemd
import salt.utils.versions
import salt.utils.yaml
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
MinionError,
SaltInvocationError,
)
from salt.modules.cmdmod import _parse_env
log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
import apt.cache
import apt.debfile
from aptsources.sourceslist import (
SourceEntry,
SourcesList,
)
HAS_APT = True
except ImportError:
HAS_APT = False
try:
import apt_pkg
HAS_APTPKG = True
except ImportError:
HAS_APTPKG = False
try:
import softwareproperties.ppa
HAS_SOFTWAREPROPERTIES = True
except ImportError:
HAS_SOFTWAREPROPERTIES = False
# pylint: enable=import-error
APT_LISTS_PATH = "/var/lib/apt/lists"
PKG_ARCH_SEPARATOR = ":"
# Source format for urllib fallback on PPA handling
LP_SRC_FORMAT = "deb http://ppa.launchpad.net/{0}/{1}/ubuntu {2} main"
LP_PVT_SRC_FORMAT = "deb https://{0}private-ppa.launchpad.net/{1}/{2}/ubuntu {3} main"
_MODIFY_OK = frozenset(["uri", "comps", "architectures", "disabled", "file", "dist"])
DPKG_ENV_VARS = {
"APT_LISTBUGS_FRONTEND": "none",
"APT_LISTCHANGES_FRONTEND": "none",
"DEBIAN_FRONTEND": "noninteractive",
"UCF_FORCE_CONFFOLD": "1",
}
# Define the module's virtual name
__virtualname__ = "pkg"
def __virtual__():
"""
Confirm this module is on a Debian-based system
"""
# If your minion is running an OS which is Debian-based but does not have
# an "os_family" grain of Debian, then the proper fix is NOT to check for
# the minion's "os_family" grain here in the __virtual__. The correct fix
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
# dict in salt/grains/core.py, so that we assign the correct "os_family"
# grain to the minion.
if __grains__.get("os_family") == "Debian":
return __virtualname__
return False, "The pkg module could not be loaded: unsupported OS family"
def __init__(opts):
"""
For Debian and derivative systems, set up
a few env variables to keep apt happy and
non-interactive.
"""
if __virtual__() == __virtualname__:
# Export these puppies so they persist
os.environ.update(DPKG_ENV_VARS)
if not HAS_APT:
class SourceEntry: # pylint: disable=function-redefined
def __init__(self, line, file=None):
self.invalid = False
self.comps = []
self.disabled = False
self.comment = ""
self.dist = ""
self.type = ""
self.uri = ""
self.line = line
self.architectures = []
self.file = file
if not self.file:
self.file = str(pathlib.Path(os.sep, "etc", "apt", "sources.list"))
self._parse_sources(line)
def repo_line(self):
"""
Return the repo line for the sources file
"""
repo_line = []
if self.invalid:
return self.line
if self.disabled:
repo_line.append("#")
repo_line.append(self.type)
if self.architectures:
repo_line.append("[arch={}]".format(" ".join(self.architectures))
|
)
repo_line = repo_line + [self.uri, self.dist, " ".join(self.comps)]
|
if self.comment:
repo_line.append("#{}".format(self.comment))
return " ".join(repo_line) + "\n"
def _parse_sources(self, line):
"""
Parse lines from sources files
"""
self.disabled = False
repo_line = self.line.strip().split()
if not repo_line:
self.invalid = True
return False
if repo_line[0].startswith("#"):
repo_line.pop(0)
self.disabled = True
if repo_line[0] not in ["deb", "deb-src", "rpm", "rpm-src"]:
self.invalid = True
return False
if repo_line[1].startswith("["):
opts = re.search(r"\[.*\]", self.line).group(0).strip("[]")
repo_line = [x for x in (line.strip("[]") for line in repo_line) if x]
for opt in opts.split():
if opt.startswith("arch"):
self.architectures.extend(opt.split("=", 1)[1].split(","))
try:
repo_line.pop(repo_line.index(opt))
except ValueError:
repo_line.pop(repo_line.index("[" + opt + "]"))
self.type = repo_line[0]
self.uri = repo_line[1]
self.dist = repo_line[2]
self.comps = repo_line[3:]
class SourcesList: # pylint: disable=function-redefined
def __init__(self):
self.list = []
self.files = [
pathlib.Path(os.sep, "etc", "apt", "sources.list"),
pathlib.Path(os.sep, "etc", "apt", "sources.list.d"),
]
for file in self.files:
if file.is_dir():
for fp in file.glob("**/*.list"):
self.add_file(file=fp)
else:
self.add_file(file)
def __iter__(self):
yield from self.list
def add_file(self, file):
"""
Add the lines of a file to self.list
"""
if file.is_file():
with salt.utils.files.fopen(file) as source:
for line in source:
self.list.append(SourceEntry(line, file=str(file)))
else:
log.debug("The apt sources file %s does not exist", file)
def add(self, type, uri, dist, orig_comps, architectures):
repo_line = [
type,
" [arch={}] ".format(" ".join(architectures)) if architectures else "",
uri,
dist,
" ".join(orig_comps),
]
return SourceEntry(" ".join(repo_line))
def remove(self, source):
"""
remove a source from the list of sources
"""
self.list.remove(source)
def save(self):
"""
write all of the sources from the list of sources
to the file.
"""
filemap = {}
with tempfile.TemporaryDirectory() as tmpdir:
for source in self.list:
fname = pathlib.Path(tmpdir, pathlib.Path(source.file).name)
with salt.utils.files.fopen(fname, "a") as fp:
fp.write(source.repo_line())
if source.file not in filemap:
filemap[source.file] = {"tmp": fname}
for fp in filemap:
shutil.move(filemap[fp]["tmp"], fp)
def _get_ppa_info_from_launchpad(owner_name, ppa_name):
"""
Idea from softwareproperties.ppa.
Uses urllib2 which sacrifices server cert verification.
This is used as fall-back code or for secure PPAs
:par
|
frac/celery
|
celery/utils/serialization.py
|
Python
|
bsd-3-clause
| 4,836 | 0.000414 |
import inspect
import sys
import types
from copy import deepcopy
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError:
cpickle = None
if sys.version_info < (2, 6): # pragma: no cover
# cPickle is broken in Python <= 2.5.
# It unsafely and incorrectly uses relative instead of absolute imports,
# so e.g.:
# exceptions.KeyError
# becomes:
# celery.exceptions.KeyError
#
# Your best choice is to upgrade to Python 2.6,
# as while the pure pickle version has worse performance,
# it is the only safe option for older Python versions.
pickle = pypickle
else:
pickle = cpickle or pypickle
# BaseException was introduced in Python 2.5.
try:
_error_bases = (BaseException, )
except NameError: # pragma: no cover
_error_bases = (SystemExit, KeyboardInterrupt)
#: List of base classes we probably don't want to reduce to.
unwanted_base_classes = (StandardError, Exception) + _error_bases + (object, )
if sys.version_info < (2, 5): # pragma: no cover
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parent, unused):
return types.ClassType(name, (parent,), {})
else:
def subclass_exception(name, parent, module):
return type(name, (parent,), {'__module__': module})
def find_nearest_pickleable_exception(exc):
"""With an exception instance, iterate over its super classes (by mro)
and find the first super exception that is pickleable. It does
not go below :exc:`Exception` (i.e. it skips :exc:`Exception`,
:class:`BaseException` and :class:`object`). If that happens
you should use :exc:`UnpickleableException` instead.
:param exc: An exception instance.
:returns: the nearest exception if it's not :exc:`Exception` or below,
if it is it returns :const:`None`.
:rtype :exc:`Exception`:
"""
cls = exc.__class__
getmro_ = getattr(cls, "mro", None)
# old-style classes doesn't have mro()
if not getmro_:
# all Py2.4 exceptions has a baseclass.
if not getattr(cls, "__bases__", ()):
return
# Use inspect.getmro() to traverse bases instead.
getmro_ = lambda: inspect.getmro(cls)
for supercls in getmro_():
if supercls in unwanted_base_classes:
# only BaseException and object, from here on down,
# we don't care about these.
return
try:
exc_args = getattr(exc, "args", [])
superexc = supercls(*exc_args)
pickle.dumps(superexc)
except:
pass
else:
return superexc
def create_exception_cls(name, module, parent=None):
"""Dynamically create an exception class."""
if not parent:
parent = Exception
return subclass_exception(name, parent, module)
class UnpickleableExceptionWrapper(Exception):
"""Wraps unpickleable exceptions.
:param exc_module: see :attr:`exc_module`.
:param exc_cls_name: see :attr:`exc_cls_name`.
:param exc_args: see :attr:`exc_args`
**Example**
.. code-block:: python
>>> try:
... something_raising_unpickleable_exc()
>>> except Exception, e:
... exc = UnpickleableException(e.__class__.__module__,
... e.__class__.__name__,
... e.args)
... pickle.dumps(exc) # Works
|
fine.
"""
#: The module of the original exception.
exc_module = None
#: The name of the ori
|
ginal exception class.
exc_cls_name = None
#: The arguments for the original exception.
exc_args = None
def __init__(self, exc_module, exc_cls_name, exc_args):
self.exc_module = exc_module
self.exc_cls_name = exc_cls_name
self.exc_args = exc_args
Exception.__init__(self, exc_module, exc_cls_name, exc_args)
@classmethod
def from_exception(cls, exc):
return cls(exc.__class__.__module__,
exc.__class__.__name__,
getattr(exc, "args", []))
def restore(self):
return create_exception_cls(self.exc_cls_name,
self.exc_module)(*self.exc_args)
def get_pickleable_exception(exc):
"""Make sure exception is pickleable."""
nearest = find_nearest_pickleable_exception(exc)
if nearest:
return nearest
try:
pickle.dumps(deepcopy(exc))
except Exception:
return UnpickleableExceptionWrapper.from_exception(exc)
return exc
def get_pickled_exception(exc):
"""Get original exception from exception pickled using
:meth:`get_pickleable_exception`."""
if isinstance(exc, UnpickleableExceptionWrapper):
return exc.restore()
return exc
|
shoopio/shoop
|
shuup/core/cache/__init__.py
|
Python
|
agpl-3.0
| 1,316 | 0 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for versioned caching and automatic timeout determination.
Versioning works by way of namespaces. Namespaces are the first
colon-separated part of cache keys.
For instance, the cache keys ``price:10``, ``price:20``, and ``price``
all belong to the ``price`` namespace and can be invalidated with
one ``bump_version("price")`` call.
The versions themselves are stored
|
within the cache, within the
``_version`` namespace. (As an implementation detail, this allows one
to invalidate _all_ versioned keys by bumping the version of
``_version``. Very meta!)
"""
from .impl import VersionedCache
__all__ = [
"bump_version",
"clear",
"get",
"set",
"VersionedCache",
|
]
_default_cache = None
get = None
set = None
bump_version = None
clear = None
def init_cache():
global _default_cache, get, set, bump_version, clear
_default_cache = VersionedCache(using="default")
get = _default_cache.get
set = _default_cache.set
bump_version = _default_cache.bump_version
clear = _default_cache.clear
init_cache()
|
Groutcho/exii
|
tests/instructions/test_pop.py
|
Python
|
gpl-2.0
| 762 | 0 |
from unittest import TestCase
from instructions.pop import Pop
from context import Context
from registers import AX, SP
__author__ = "Sébastien Guimmara"
class TestPop(TestCase):
def test_execute(self):
p = Pop.parse(['ax'])
ctx = Context(None)
ctx.registers.set(SP, 0xFFFE)
self.assertEqual(ctx.registers.get(SP).value, 0xFFFE)
|
ctx.stack.set(0xFFFE, 0x0022)
p.execute(ctx)
self.assertEqual(ctx.registers.get(AX).value, 0x0022)
self.assertEqual(ctx.registers.get(SP).value, 0xFFFF)
def test_parse_bad_number_of_arguments(self):
self.assertRaises(SyntaxError, lambda: Pop.parse(['ax,', '2']))
def test_parse_ok(self):
self.assertI
|
sInstance(Pop.parse(['ax']), Pop)
|
skuda/client-python
|
kubernetes/test/test_v1beta1_network_policy_ingress_rule.py
|
Python
|
apache-2.0
| 991 | 0.004036 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_network_policy_ingress_rule import V1beta1NetworkPolicyIngressRule
class TestV1beta1NetworkPolicyIng
|
ressRule(unittest.TestCase):
""" V1beta1NetworkPolicyIngressRule unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1NetworkPolicyIngressRule(self):
"""
Test V1beta1NetworkPolicyIngressRule
"""
model = kubernetes.client.models.v1beta1_network_policy_ingress_rule.V1beta1NetworkPolicyIngressRule()
if __name__ == '__main__':
|
unittest.main()
|
olipratt/swagger-conformance
|
swaggerconformance/response.py
|
Python
|
mit
| 1,388 | 0 |
"""
A response received to a Swagger API operation.
"""
import logging
__all__ = ["Response"]
log = logging.getLogger(__name__)
class CaseInsensitiveDict(dict):
"""Dictionary with case insensitive lookup of string keys."""
def __getitem__(self, key):
return {k.lower(): v for k, v in self.items()}[key.lower()]
class Response:
"""A response received to a Swagger API operation.
:param raw_response: The raw response.
:type raw_response: pyswagger.io.Response
"""
def __init__(self, ra
|
w_response):
self._raw_response = raw_response
@pro
|
perty
def status(self):
"""HTTP status code of the response.
:rtype: int
"""
return self._raw_response.status
@property
def body(self):
"""Parsed response body converted to objects via the codec in use."""
return self._raw_response.data
@property
def raw(self):
"""Raw response body.
:rtype: bytes
"""
return self._raw_response.raw
@property
def headers(self):
"""HTTP headers received on the response.
Example format is ``{'Content-Type': [xxx, xxx]}``
Header field names are case insensitive (See
http://www.ietf.org/rfc/rfc2616.txt)
:rtype: dict(str, list(str))
"""
return CaseInsensitiveDict(self._raw_response.header)
|
rpm-software-management/dnf
|
dnf/yum/misc.py
|
Python
|
gpl-2.0
| 11,251 | 0.001867 |
# misc.py
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later
|
version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY exp
|
ressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""
Assorted utility functions for yum.
"""
from __future__ import print_function, absolute_import
from __future__ import unicode_literals
from dnf.pycomp import base64_decodebytes, basestring, unicode
from stat import *
import libdnf.utils
import dnf.const
import dnf.crypto
import dnf.exceptions
import dnf.i18n
import errno
import glob
import io
import os
import os.path
import pwd
import re
import shutil
import tempfile
_default_checksums = ['sha256']
_re_compiled_glob_match = None
def re_glob(s):
""" Tests if a string is a shell wildcard. """
global _re_compiled_glob_match
if _re_compiled_glob_match is None:
_re_compiled_glob_match = re.compile(r'[*?]|\[.+\]').search
return _re_compiled_glob_match(s)
_re_compiled_full_match = None
def re_full_search_needed(s):
""" Tests if a string needs a full nevra match, instead of just name. """
global _re_compiled_full_match
if _re_compiled_full_match is None:
# A glob, or a "." or "-" separator, followed by something (the ".")
one = re.compile(r'.*([-.*?]|\[.+\]).').match
# Any epoch, for envra
two = re.compile('[0-9]+:').match
_re_compiled_full_match = (one, two)
for rec in _re_compiled_full_match:
if rec(s):
return True
return False
def get_default_chksum_type():
return _default_checksums[0]
class GenericHolder(object):
"""Generic Holder class used to hold other objects of known types
It exists purely to be able to do object.somestuff, object.someotherstuff
or object[key] and pass object to another function that will
understand it"""
def __init__(self, iter=None):
self.__iter = iter
def __iter__(self):
if self.__iter is not None:
return iter(self[self.__iter])
def __getitem__(self, item):
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(item)
def all_lists(self):
"""Return a dictionary of all lists."""
return {key: list_ for key, list_ in vars(self).items()
if type(list_) is list}
def merge_lists(self, other):
""" Concatenate the list attributes from 'other' to ours. """
for (key, val) in other.all_lists().items():
vars(self).setdefault(key, []).extend(val)
return self
def procgpgkey(rawkey):
'''Convert ASCII-armored GPG key to binary
'''
# Normalise newlines
rawkey = re.sub(b'\r\n?', b'\n', rawkey)
# Extract block
block = io.BytesIO()
inblock = 0
pastheaders = 0
for line in rawkey.split(b'\n'):
if line.startswith(b'-----BEGIN PGP PUBLIC KEY BLOCK-----'):
inblock = 1
elif inblock and line.strip() == b'':
pastheaders = 1
elif inblock and line.startswith(b'-----END PGP PUBLIC KEY BLOCK-----'):
# Hit the end of the block, get out
break
elif pastheaders and line.startswith(b'='):
# Hit the CRC line, don't include this and stop
break
elif pastheaders:
block.write(line + b'\n')
# Decode and return
return base64_decodebytes(block.getvalue())
def keyInstalled(ts, keyid, timestamp):
'''
Return if the GPG key described by the given keyid and timestamp are
installed in the rpmdb.
The keyid and timestamp should both be passed as integers.
The ts is an rpm transaction set object
Return values:
- -1 key is not installed
- 0 key with matching ID and timestamp is installed
- 1 key with matching ID is installed but has an older timestamp
- 2 key with matching ID is installed but has a newer timestamp
No effort is made to handle duplicates. The first matching keyid is used to
calculate the return result.
'''
# Search
for hdr in ts.dbMatch('name', 'gpg-pubkey'):
if hdr['version'] == keyid:
installedts = int(hdr['release'], 16)
if installedts == timestamp:
return 0
elif installedts < timestamp:
return 1
else:
return 2
return -1
def import_key_to_pubring(rawkey, keyid, gpgdir=None, make_ro_copy=True):
if not os.path.exists(gpgdir):
os.makedirs(gpgdir)
with dnf.crypto.pubring_dir(gpgdir), dnf.crypto.Context() as ctx:
# import the key
with open(os.path.join(gpgdir, 'gpg.conf'), 'wb') as fp:
fp.write(b'')
ctx.op_import(rawkey)
if make_ro_copy:
rodir = gpgdir + '-ro'
if not os.path.exists(rodir):
os.makedirs(rodir, mode=0o755)
for f in glob.glob(gpgdir + '/*'):
basename = os.path.basename(f)
ro_f = rodir + '/' + basename
shutil.copy(f, ro_f)
os.chmod(ro_f, 0o755)
# yes it is this stupid, why do you ask?
opts = """lock-never
no-auto-check-trustdb
trust-model direct
no-expensive-trust-checks
no-permission-warning
preserve-permissions
"""
with open(os.path.join(rodir, 'gpg.conf'), 'w', 0o755) as fp:
fp.write(opts)
return True
def getCacheDir():
"""return a path to a valid and safe cachedir - only used when not running
as root or when --tempcache is set"""
uid = os.geteuid()
try:
usertup = pwd.getpwuid(uid)
username = dnf.i18n.ucd(usertup[0])
prefix = '%s-%s-' % (dnf.const.PREFIX, username)
except KeyError:
prefix = '%s-%s-' % (dnf.const.PREFIX, uid)
# check for /var/tmp/prefix-* -
dirpath = '%s/%s*' % (dnf.const.TMPDIR, prefix)
cachedirs = sorted(glob.glob(dirpath))
for thisdir in cachedirs:
stats = os.lstat(thisdir)
if S_ISDIR(stats[0]) and S_IMODE(stats[0]) == 448 and stats[4] == uid:
return thisdir
# make the dir (tempfile.mkdtemp())
cachedir = tempfile.mkdtemp(prefix=prefix, dir=dnf.const.TMPDIR)
return cachedir
def seq_max_split(seq, max_entries):
""" Given a seq, split into a list of lists of length max_entries each. """
ret = []
num = len(seq)
seq = list(seq) # Trying to use a set/etc. here is bad
beg = 0
while num > max_entries:
end = beg + max_entries
ret.append(seq[beg:end])
beg += max_entries
num -= max_entries
ret.append(seq[beg:])
return ret
def unlink_f(filename):
""" Call os.unlink, but don't die if the file isn't there. This is the main
difference between "rm -f" and plain "rm". """
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def stat_f(filename, ignore_EACCES=False):
""" Call os.stat(), but don't die if the file isn't there. Returns None. """
try:
return os.stat(filename)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return None
if ignore_EACCES and e.errno == errno.EACCES:
return None
raise
def _get
|
Digilent/u-boot-digilent
|
test/py/tests/test_md.py
|
Python
|
gpl-2.0
| 1,426 | 0.001403 |
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015 Stephen Warren
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
import pytest
import u_boot_utils
@pytest.mark.buildconfigspec('cmd_memory')
def test_md(u_boot_console):
"""Test that md reads memory as expected, and that memory can be modified
using the mw command."""
ram_base = u_boot_utils.find_ram_base(u_boot_console)
addr = '%08x' % ram_base
val = 'a5f09876'
|
expected_response = addr + ': ' + val
u_boot_console.run_command('mw ' + addr + ' 0 10')
response = u_boot_console.run_command('md ' + addr + ' 10')
assert(not (expected_response in response))
u_boot_console.run_command('mw ' + addr + ' ' + val)
response = u_boot_console.run_command('md ' + addr + ' 10')
assert(expected_response in response)
@pytest.mark.buildconfigspec(
|
'cmd_memory')
def test_md_repeat(u_boot_console):
"""Test command repeat (via executing an empty command) operates correctly
for "md"; the command must repeat and dump an incrementing address."""
ram_base = u_boot_utils.find_ram_base(u_boot_console)
addr_base = '%08x' % ram_base
words = 0x10
addr_repeat = '%08x' % (ram_base + (words * 4))
u_boot_console.run_command('md %s %x' % (addr_base, words))
response = u_boot_console.run_command('')
expected_response = addr_repeat + ': '
assert(expected_response in response)
|
afimb/gtfslib-python
|
gtfslib/orm.py
|
Python
|
gpl-3.0
| 22,956 | 0.008059 |
# -*- coding: utf-8 -*-
# This file is part of Gtfslib-python.
#
# Gtfslib-python is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gtfslib-python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gtfslib-python. If not, see <http://www.gnu.org/licenses/>.
"""
@author: Laurent GRÉGOIRE <laurent.gregoire@mecatran.com>
"""
from sqlalchemy.orm import mapper, relationship, backref, clear_mappers
from sqlalchemy.orm.relationships import foreign
from sqlalchemy.sql.schema import Column, MetaData, Table, ForeignKey, \
ForeignKeyConstraint, Index
from sqlalchemy.sql.sqltypes import String, Integer, Float, Date, Boolean
from gtfslib.model import FeedInfo, Agency, Stop, Route, Calendar, CalendarDate, \
Trip, StopTime, Transfer, Shape, ShapePoint, Zone, FareAttribute, FareRule
# ORM Mappings
class _Orm(object):
def __init__(self, engine, schema=None):
self._metadata = MetaData(schema=schema)
self.mappers = []
_feedinfo_id_column = Column('feed_id', String, primary_key=True)
_agency_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_route_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_feedinfo_mapper = Table('feed_info', self._metadata,
_feedinfo_id_column,
Column('feed_publisher_name', String),
Column('feed_publisher_url', String),
Column('feed_contact_email', String), # Non-standard (yet) field
Column('feed_contact_url', String), # Non-standard (yet) field
Column('feed_lang', String),
Column('feed_start_date', Date),
Column('feed_end_date', Date),
Column('feed_version', String))
self.mappers.append(mapper(FeedInfo, _feedinfo_mapper, properties={
}))
_agency_id_column = Column('agency_id', String, primary_key=True)
_route_agency_id_column = Column('agency_id', String, nullable=False)
_agency_mapper = Table('agency', self._metadata,
_agency_feed_id_column,
_agency_id_column,
Column('agency_name', String, nullable=False),
Column('agency_url', String, nullable=False),
Column('agency_timezone', String, nullable=False),
Column('agency_lang', String),
Column('agency_phone', String),
Column('agency_fare_url', String),
Column('agency_email', String))
self.mappers.append(mapper(Agency, _agency_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('agencies', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_agency_feed_id_column))
}))
_zone_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_zone_id_column = Column('zone_id', String, primary_key=True)
_zone_mapper = Table('zones', self._metadata,
_zone_feed_id_column,
_zone_id_column)
self.mappers.append(mapper(Zone, _zone_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('zones', cascade="all,delete-orphan"),
primaryjoin
|
=_feedinfo_id_column == foreign(_zone_feed_id_column))
}))
_stop_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_stop_id_column = Column('stop_id', String, primary_key=True)
_stop_parent_id_column = Column('parent_station_id', String, nullable=True)
_stop_zone_id_column = Column(
|
'zone_id', String, nullable=True)
_stop_mapper = Table('stops', self._metadata,
_stop_feed_id_column,
_stop_id_column,
_stop_parent_id_column,
Column('location_type', Integer, nullable=False),
Column('stop_name', String, nullable=False),
Column('stop_lat', Float, nullable=False),
Column('stop_lon', Float, nullable=False),
Column('wheelchair_boarding', Integer, nullable=False),
Column('stop_code', String),
Column('stop_desc', String),
_stop_zone_id_column,
Column('stop_url', String),
Column('stop_timezone', String),
ForeignKeyConstraint(['feed_id', 'parent_station_id'], ['stops.feed_id', 'stops.stop_id']),
ForeignKeyConstraint(['feed_id', 'zone_id'], ['zones.feed_id', 'zones.zone_id']),
# TODO Make those index parametrable
Index('idx_stops_lat', 'stop_lat'),
Index('idx_stops_lon', 'stop_lon'),
Index('idx_stops_code', 'feed_id', 'stop_code'),
Index('idx_stops_zone', 'feed_id', 'zone_id'),
Index('idx_stops_parent', 'feed_id', 'parent_station_id'))
self.mappers.append(mapper(Stop, _stop_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('stops', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_stop_feed_id_column)),
'sub_stops' : relationship(Stop, remote_side=[_stop_feed_id_column, _stop_parent_id_column], uselist=True,
primaryjoin=(_stop_parent_id_column == foreign(_stop_id_column)) & (_stop_feed_id_column == _stop_feed_id_column)),
'parent_station' : relationship(Stop, remote_side=[_stop_feed_id_column, _stop_id_column],
primaryjoin=(_stop_id_column == foreign(_stop_parent_id_column)) & (_stop_feed_id_column == _stop_feed_id_column)),
'zone' : relationship(Zone, backref=backref('stops', cascade="all,delete-orphan"),
primaryjoin=(_zone_id_column == foreign(_stop_zone_id_column)) & (_zone_feed_id_column == _stop_feed_id_column))
}))
_transfer_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_transfer_from_stop_id_column = Column('from_stop_id', String, primary_key=True)
_transfer_to_stop_id_column = Column('to_stop_id', String, primary_key=True)
_transfer_mapper = Table('transfers', self._metadata,
_transfer_feed_id_column,
_transfer_from_stop_id_column,
_transfer_to_stop_id_column,
Column('transfer_type', Integer, nullable=False),
Column('min_transfer_time', Integer),
ForeignKeyConstraint(['feed_id', 'from_stop_id'], ['stops.feed_id', 'stops.stop_id']),
ForeignKeyConstraint(['feed_id', 'to_stop_id'], ['stops.feed_id', 'stops.stop_id']),
Index('idx_transfer_from', 'feed_id', 'from_stop_id'),
Index('idx_transfer_to', 'feed_id', 'to_stop_id'))
self.mappers.append(mapper(Transfer, _transfer_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('transfers', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_transfer_feed_id_column)),
'from_stop' : relationship(Stop, backref=backref('from_transfers', cascade='all', uselist=True), uselist=False,
primaryjoin=(_transfer
|
yuncliu/Learn
|
python/decorator.py
|
Python
|
bsd-3-clause
| 299 | 0.013378 |
#!/usr/bin/python3
#-*- coding:utf-8
|
-*-
from functools import wraps
def xxx(func):
@wraps(func)
def my(n):
func(n*100)
|
return
return my
@xxx
def abc(n):
print(n)
if __name__ == '__main__':
abc(10)
abc.__wrapped__(10)
xx = abc.__wrapped__
xx(1234)
|
blrm/openshift-tools
|
docker/oso-psad/src/scripts/check_psad.py
|
Python
|
apache-2.0
| 5,322 | 0.004134 |
#!/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This script is used to check the psad logs for positive port scanning traffic
and report its findings to Zabbix.
"""
from __future__ import print_function
from datetime import datetime
import os
import re
import boto3
import botocore
import yaml
# Reason: disable pylint import-error because our modules aren't loaded on jenkins.
# pylint: disable=import-error
from openshift_tools.monitoring.zagg_sender import ZaggSender
class CheckStatus(object):
""" Class to check for issues found in psad logs. """
@staticmethod
def check_psad(log_message, logfile):
""" Check number of occurrences of issues in the specified logfile.
Returns:
An int representing the number of issues found.
"""
total_issues = 0
if os.path.isfile(logfile):
with open(logfile) as open_file:
stripped_line = list([line.rstrip() for line in open_file.readlines()])
for line in stripped_line:
line_found = re.search(log_message, line, re.IGNORECASE)
if line_found:
total_issues += 1
return total_issues
else:
raise ValueError(logfile + ' does not exist.')
@staticmethod
def search_logfile(logfile):
""" Look for positive scan results. """
results = []
with open(logfile) as open_file:
between = False
for line in open_file:
tline = line.strip()
if tline == 'iptables auto-blocked IPs:':
between = True
elif tline == 'Total protocol packet counters:':
between = False
elif between and tline != '':
results.append(tline)
issues = len(results)
return issues
@staticmethod
def get_config(config_path):
""" Open and read config data from the variables file. """
config_settings = {}
if os.path.isfile(config_path):
with open(config_path, 'r') as scan_config:
yaml_config = yaml.load(scan_config)
if yaml_config['opsad_creds_file']:
config_settings['opsad_creds_file'] = yaml_config['opsad_creds_file']
if yaml_config['opsad_s3_bucket']:
config_settings['opsad_s3_bucket'] = yaml_config['opsad_s3_bucket']
if yaml_config['opsad_log_file']:
config_settings['opsad_log_file'] = yaml_config['opsad_log_file']
if yaml_config['opsad_host_name']:
config_settings['opsad_host_name'] = yaml_config['opsad_host_name']
if yaml_config['opsad_cluster_name']:
config_settings['opsad_cluster_name'] = yaml_config['opsad_cluster_name']
return config_settings
@staticmethod
def upload_data(config_dict):
""" Use the current AWS_PROFILE to upload files to the specified bucket.
Raises:
A ValueError if the specified bucket can not be found.
"""
logfile = config_dict['opsad_log_file']
hostname = config_dict['opsad_host_name']
credsfile = config_dict['opsad_creds_file']
bucket = config_dict['opsad_s3_bucket']
cluster
|
= config_dict['opsad_cluster_name']
os.environ["AWS_SHARED_CREDENTI
|
ALS_FILE"] = credsfile
s3_session = boto3.resource('s3')
exists = True
try:
s3_session.meta.client.head_bucket(Bucket=bucket)
except botocore.exceptions.ClientError as client_exception:
error_code = int(client_exception.response['Error']['Code'])
if error_code == 404:
exists = False
if exists:
s3_client = boto3.resource('s3')
s3_bucket = s3_client.Bucket(bucket)
if os.path.isfile(logfile):
print('\nUploading logfile to %s bucket.' % bucket)
with open(logfile) as open_file:
log_data = open_file.read()
bucket_path = cluster + '/' + \
hostname + '/' + \
datetime.utcnow().strftime('%Y') + '/' + \
datetime.utcnow().strftime('%m') + '/' + \
datetime.utcnow().strftime('%d') + '_status.txt'
s3_bucket.put_object(Key=bucket_path, Body=log_data)
else:
raise ValueError(logfile + ' does not exist.')
else:
raise ValueError(bucket + ' does not exist.')
#pylint: disable=no-member
def main(self):
""" Main function. """
zag = ZaggSender()
config_dict = self.get_config('/etc/openshift_tools/scanreport_config.yml')
logfile = config_dict['opsad_log_file']
result_status = self.search_logfile(logfile)
check = 'psad.found.scanner'
zag.add_zabbix_keys({check: result_status})
zag.send_metrics()
if result_status > 0:
self.upload_data(config_dict)
if __name__ == '__main__':
PSAD_STATUS = CheckStatus()
PSAD_STATUS.main()
|
martiert/bitbake
|
lib/bb/fetch2/osc.py
|
Python
|
gpl-2.0
| 4,506 | 0.00466 |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
Bitbake "Fetch" implementation for osc (Opensuse build service client).
Based on the svn "Fetch" implementation.
"""
import os
import sys
import logging
import bb
from bb import data
from bb.fetch2 import FetchMethod
from bb.fetch2 import FetchError
from bb.fetch2 import MissingParameterError
from bb.fetch2 import runfetchcmd
class Osc(FetchMethod):
"""Class to fetch a module or modules from Opensuse build server
repositories."""
def supports(self, url, ud, d):
"""
Check to see if a given url can be fetched with osc.
"""
return ud.type in ['osc']
def urldata_init(self, ud, d):
if not "module" in ud.parm:
raise MissingParameterError('module', ud.url)
ud.module = ud.parm["module"]
# Create paths to osc checkouts
relpath = self._strip_leading_slashes(ud.path)
ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host)
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
if 'rev' in ud.parm:
ud.revision = ud.parm['rev']
else:
pv = data.getVar("PV", d, 0)
rev = bb.fetch2.srcrev_internal_helper(ud, d)
if rev and rev != True:
ud.revision = rev
else:
ud.revision = ""
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
def _buildosccommand(self, ud, d, command):
"""
Build up an ocs commandline based on ud
command
|
is "fetch", "update", "info"
"""
basecmd = data.expand('${FETCHCMD_osc}', d)
proto = ud.parm.get('protocol', 'ocs')
options = []
config = "-c %s" % self.generate_config(ud, d)
if ud.revision:
options.append("-r %s" % ud.revision)
coroot = self._strip_leading_slashes(ud.path)
if command == "fetch":
osccmd = "%s %s
|
co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
elif command == "update":
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
else:
raise FetchError("Invalid osc command %s" % command, ud.url)
return osccmd
def download(self, loc, ud, d):
"""
Fetch url
"""
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK):
oscupdatecmd = self._buildosccommand(ud, d, "update")
logger.info("Update "+ loc)
# update sources there
os.chdir(ud.moddir)
logger.debug(1, "Running %s", oscupdatecmd)
bb.fetch2.check_network_access(d, oscupdatecmd, ud.url)
runfetchcmd(oscupdatecmd, d)
else:
oscfetchcmd = self._buildosccommand(ud, d, "fetch")
logger.info("Fetch " + loc)
# check out sources there
bb.utils.mkdirhier(ud.pkgdir)
os.chdir(ud.pkgdir)
logger.debug(1, "Running %s", oscfetchcmd)
bb.fetch2.check_network_access(d, oscfetchcmd, ud.url)
runfetchcmd(oscfetchcmd, d)
os.chdir(os.path.join(ud.pkgdir + ud.path))
# tar them up to a defined filename
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d, cleanup = [ud.localpath])
def supports_srcrev(self):
return False
def generate_config(self, ud, d):
"""
Generate a .oscrc to be used for this run.
"""
config_path = os.path.join(data.expand('${OSCDIR}', d), "oscrc")
if (os.path.exists(config_path)):
os.remove(config_path)
f = open(config_path, 'w')
f.write("[general]\n")
f.write("apisrv = %s\n" % ud.host)
f.write("scheme = http\n")
f.write("su-wrapper = su -c\n")
f.write("build-root = %s\n" % data.expand('${WORKDIR}', d))
f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n")
f.write("extra-pkgs = gzip\n")
f.write("\n")
f.write("[%s]\n" % ud.host)
f.write("user = %s\n" % ud.parm["user"])
f.write("pass = %s\n" % ud.parm["pswd"])
f.close()
return config_path
|
PyCQA/astroid
|
tests/testdata/python3/data/conditional.py
|
Python
|
lgpl-2.1
| 87 | 0.011494 |
from data.conditional_i
|
mport i
|
mport (
dump,
# dumps,
# load,
# loads,
)
|
kreatorkodi/repository.torrentbr
|
plugin.video.yatp/libs/client/actions.py
|
Python
|
gpl-2.0
| 8,310 | 0.003851 |
# coding: utf-8
# Module: actions
# Created on: 27.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. (romanvm@yandex.ua)
# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html
import os
import xbmcgui
import xbmcplugin
from simpleplugin import Plugin
import json_requests as jsonrq
from buffering import buffer_torrent, stream_torrent, add_torrent, get_videofiles
plugin = Plugin()
_ = plugin.initialize_gettext()
icons = os.path.join(plugin.path, 'resources', 'icons')
commands = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'commands.py')
def _play(path):
"""
Play a videofile
:param path:
:return:
"""
plugin.log_notice('Path to play: {0}'.format(path))
return plugin.resolve_url(path, succeeded=bool(path))
@plugin.action()
def root():
"""
Plugin root
"""
return [{'label': _('Play .torrent file...'),
'thumb': os.path.join(icons, 'play.png'),
'url': plugin.get_url(action='select_torrent', target='play')},
{'label': _('Download torrent from .torrent file...'),
'thumb': os.path.join(icons, 'down.png'),
'url': plugin.get_url(action='select_torrent', target='download'),
'is_folder': False},
{'label': _('Torrents'),
'thumb': plugin.icon,
'url': plugin.get_url(action='torrents')}]
@plugin.action()
def select_torrent(params):
"""
Select .torrent file to play
:param params:
:return:
"""
torrent = xbmcgui.Dialog().browse(1, _('Select .torrent file'), 'video', mask='.torrent')
if torrent:
plugin.log_notice('Torrent selected: {0}'.format(torrent))
if params['target'] == 'play':
return list_files({'torrent': torrent})
else:
download_torrent({'torrent': torrent})
@plugin.action('play')
def play_torrent(params):
"""
Play torrent
:param params:
:return:
"""
file_index = params.get('file_index')
if file_index is not None and file_index != 'dialog':
file_index = int(file_index)
return _play(buffer_torrent(params['torrent'], file_index))
@plugin.action()
def play_file(params):
"""
Stream a file from torrent by its index
The torrent must be already added to the session!
:param params:
:return:
"""
return _play(stream_torrent(int(params['file_index']), params['info_hash']))
@plugin.action('download')
def download_torrent(params):
"""
Add torrent for downloading
:param params:
:return:
"""
jsonrq.add_torrent(params['torrent'], False)
xbmcgui.Dialog().notification('YATP', _('Torrent added for downloading'), plugin.icon, 3000)
@plugin.action()
def torrents():
"""
Display the list of torrents in the session
"""
torrent_list = sorted(jsonrq.get_all_torrent_info(), key=lambda i: i['added_time'], reverse=True)
for torrent in torrent_list:
if torrent['state'] == 'downloading':
label = '[COLOR=red]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'seeding':
label = '[COLOR=green]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'paused':
label = '[COLOR=gray]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
else:
label = '[COLOR=blue]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
item = {'label': label,
'url': plugin.get_url(action='show_files', info_hash=torrent['info_hash']),
'is_folder': True}
if torrent['state'] == 'downloading':
item['thumb'] = os.path.join(icons, 'down.png')
elif torrent['state'] == 'seeding':
item['thumb'] = os.path.join(icons, 'up.png')
elif torrent['state'] == 'paused':
item['thumb'] = os.path.join(icons, 'pause.png')
else:
item['thumb'] = os.path.join(icons, 'question.png')
context_menu = [(_('Pause all torrents'),
'RunScript({commands},pause_all)'.format(commands=commands)),
(_('Resume all torrents'),
'RunScript({commands},resume_all)'.format(commands=commands)),
(_('Delete torrent'),
'RunScript({commands},delete,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Delete torrent and files'),
'RunScript({commands},delete_with_files,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Torrent info'),
'RunScript({commands},show_info,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
]
if torrent['state'] == 'paused':
context_menu.insert(0, (_('Resume torrent'),
'RunScript({commands},resume,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
else:
context_menu.insert(0, (_('Pause torrent'),
'RunScript({commands},pause,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
if torrent['state'] == 'incomplete':
context_menu.append((_('Complete download'),
'RunScript({commands},restore_finished,{info_hash})'.format(
commands=commands,
|
info_hash=torrent['info_hash'])))
item['context_menu'] = context_menu
yield item
def _build_file_list(files, info_hash):
"""
Create the list of videofiles in a torrent
:param files:
:param info_hash:
|
:return:
"""
videofiles = get_videofiles(files)
for file_ in videofiles:
ext = os.path.splitext(file_[1].lower())[1]
if ext == '.avi':
thumb = os.path.join(icons, 'avi.png')
elif ext == '.mp4':
thumb = os.path.join(icons, 'mp4.png')
elif ext == '.mkv':
thumb = os.path.join(icons, 'mkv.png')
elif ext == '.mov':
thumb = os.path.join(icons, 'mov.png')
else:
thumb = os.path.join(icons, 'play.png')
yield {'label': '{name} [{size}{unit}]'.format(name=file_[1].encode('utf-8'),
size=file_[2] / 1048576,
unit=_('MB')),
'thumb': thumb,
'url': plugin.get_url(action='play_file',
info_hash=info_hash,
file_index=file_[0]),
'is_playable': True,
'info': {'video': {'size': file_[2]}},
}
@plugin.action()
def list_files(params):
"""
Add a torrent to the session and display the list of files in a torrent
:param params:
:return:
"""
torrent_data = add_torrent(params['torrent'])
if torrent_data is not None:
return plugin.create_listing(_build_file_list(torrent_data['files'], torrent_data['info_hash']),
cache_to_disk=True,
sort_methods=(xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_SIZE))
xbmcgui.Dialog().notification(plugin.id, _('Playback cancelled.'), plugin.icon, 3000)
return []
@plugin.action()
def show_files(params):
"""
Display the list of videofiles
:param params:
:return:
"""
return plugin.create_listing(_build_file_list(jsonrq.get_files(params['info_hash']), params['info_hash']),
|
sem-geologist/hyperspy
|
hyperspy/tests/misc/test_test_utils.py
|
Python
|
gpl-3.0
| 5,176 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import warnings
from hyperspy.misc.test_utils import ignore_warning, assert_warns, all_warnings
def warnsA():
warnings.warn("Warning A!", UserWarning)
def warnsB():
warnings.warn("Warning B!", DeprecationWarning)
def warnsC():
warnings.warn("Warning C!")
def test_ignore_full_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning A!"):
warnsA()
with ignore_warning(message="Warning B!"):
warnsB()
with ignore_warning(message="Warning C!"):
warnsC()
def test_ignore_partial_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning"):
warnsA()
warnsB()
warnsC()
def test_ignore_regex_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning .?!"):
warnsA()
warnsB()
warnsC()
def test_ignore_message_fails():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning [AB]!"):
warnsA()
warnsB()
try:
warnsC()
except UserWarning as e:
assert str(e) == "Warning C!"
else:
raise ValueError("Expected warning to give error!")
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning A! Too much"):
try:
warnsA()
except UserWarning as e:
assert str(e) == "Warning A!"
else:
raise ValueError("Expected warning to give error!")
def test_ignore_type():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(category=UserWarning):
warnsA()
warnsC()
with ignore_warning(category=DeprecationWarning):
warnsB()
def test_ignore_type_fails():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(category=UserWarning):
try:
warnsB()
except DeprecationWarning as e:
assert str(e) == "Warning B!"
else:
raise ValueError("Expected warning to give error!")
def test_assert_warns_full_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning A!"):
warnsA()
with assert_warns(message="Warning B!"):
warnsB()
with assert_warns(message="Warning C!"):
warnsC()
with assert_warns(message=["Warning A!", "Warning B!", "Warning C!"]):
warnsA()
warnsB()
warnsC()
def test_assert_warns_partial_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning"):
warnsA()
warnsB()
warnsC()
def test_assert_warns_regex_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning .?!"):
warnsA()
warnsB()
warnsC()
def test_assert_warns_message_fails():
with all_warnings():
warnings.simplefilter("error")
try:
|
with assert_warns(message="Warning [AB]!"):
warnsC()
except ValueError:
pass
else:
raise AssertionError("ValueError expected!")
with all_warnings():
warnings.simplefilter("err
|
or")
try:
with assert_warns(message="Warning A! Too much"):
warnsA()
except ValueError:
pass
else:
raise ValueError("ValueError expected!")
def test_assert_warns_type():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(category=UserWarning):
warnsA()
warnsC()
with assert_warns(category=DeprecationWarning):
warnsB()
def test_assert_warns_type_fails():
with all_warnings():
warnings.simplefilter("error")
try:
with assert_warns(category=UserWarning):
warnsB()
except ValueError:
pass
else:
raise ValueError("Expected warning to give error!")
|
Parlin-Galanodel/scrapy
|
tests/test_pipeline_files.py
|
Python
|
bsd-3-clause
| 17,823 | 0.002749 |
import os
import random
import time
import hashlib
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from six.moves.urllib.parse import urlparse
from six import BytesIO
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.pipelines.files import FilesPipeline, FSFilesStore, S3FilesStore, GCSFilesStore
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.utils.python import to_bytes
from scrapy.utils.test import assert_aws_environ, get_s3_content_and_delete
from scrapy.utils.test import assert_gcs_environ, get_gcs_content_and_delete
from scrapy.utils.boto import is_botocore
from tests import mock
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class FilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt")),
'full/4ce274dd83db0368bafd7e406f382ae088e39219.txt')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")),
'full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
self.assertEqual(self.pipeline.store.basedir, self.tempdir)
path = 'some/image/key.jpg'
fullpath = os.path.join(self.tempdir, 'some', 'image', 'key.jpg')
self.assertEqual(self.pipeline.store._get_filesystem_path(path), fullpath)
@defer.inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True),
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc', 'last_modified': time.time()}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)])
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
@defer.inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc',
'last_modified': time.time() - (self.pipeline.expires * 60 * 60 * 24 * 2)}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)]),
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True)
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertNotEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
class DeprecatedFilesPipeline(FilesPipeline):
def file_key(self, url):
media_guid = hashlib.sha1(to_bytes(url)).hexdigest()
media_ext = os.path.splitext(url)[1]
return 'empty/%s%s' % (media_guid, media_ext)
class DeprecatedFilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
def init_pipeline(self, pipeline_class):
self.pipeline = pipeline_class.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def test_default_file_key_method(self):
self.init_pipeline(FilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_key("https://dev.mydeco.com/mydeco.pdf"),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def test_overridden_file_key_method(self):
self.init_pipeline(DeprecatedFilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'empty/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def tearDown(self):
rmtree(self.tempdir)
class FilesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
file_urls = Field()
files = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'file_urls': [url]})
pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': 's3://example/files/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['files'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
files = Field()
stored_file = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'files': [url]})
pipeline = FilesPipeline.from_settings(Settings({
'FILES_STORE': 's3://example/files/',
'FILES_URLS_FIELD': 'files',
'FILES_RESULT_FIELD': 'stored_file'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_file'], [results[0][1]])
class FilesPipelineTestCaseCustomSettings(unittest.TestCase):
default_cls_settings = {
"EXPIRES": 90,
"FILE
|
S_URLS_FIELD": "file_urls",
"FILES_RESULT_FIELD": "files"
|
}
file_cls_attr_settings_map = {
("E
|
radiosilence/pip
|
tests/test_vcs_backends.py
|
Python
|
mit
| 5,930 | 0.003373 |
from tests.test_pip import (reset_env, run_pip,
_create_test_package, _change_test_package_version)
from tests.local_repos import local_checkout
def test_install_editable_from_git_with_https():
"""
Test cloning from Git with https.
"""
reset_env()
result = run_pip('install', '-e',
'%s#egg=pip-test-package' %
local_checkout('git+https://github.com/pypa/pip-test-package.git'),
expect_error=True)
result.assert_installed('pip-test-package', with_files=['.git'])
def test_git_with_sha1_revisions():
"""
Git backend should be able to install from SHA1 revisions
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
_change_test_package_version(env, version_pkg_path)
sha1 = env.run('git', 'rev-parse', 'HEAD~1', cwd=version_pkg_path).stdout.strip()
run_pip('install', '-e', '%s@%s#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/'), sha1))
version = env.run('version_pkg')
assert '0.1' in version.stdout, version.stdout
def test_git_with_branch_name_as_revision():
"""
Git backend should be able to install from branch names
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'checkout', '-b', 'test_branch', expect_stderr=True, cwd=version_pkg_path)
_change_test_package_version(env, version_pkg_path)
run_pip('install', '-e', '%s@test_branch#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replac
|
e('\\', '/')))
version = env.run('version_pkg')
assert 'some different
|
version' in version.stdout
def test_git_with_tag_name_as_revision():
"""
Git backend should be able to install from tag names
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'tag', 'test_tag', expect_stderr=True, cwd=version_pkg_path)
_change_test_package_version(env, version_pkg_path)
run_pip('install', '-e', '%s@test_tag#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/')))
version = env.run('version_pkg')
assert '0.1' in version.stdout
def test_git_with_tag_name_and_update():
"""
Test cloning a git repository and updating to a different version.
"""
reset_env()
result = run_pip('install', '-e', '%s#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
result.assert_installed('pip-test-package', with_files=['.git'])
result = run_pip('install', '--global-option=--version', '-e',
'%s@0.1.2#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
assert '0.1.2' in result.stdout
def test_git_branch_should_not_be_changed():
"""
Editable installations should not change branch
related to issue #32 and #161
"""
env = reset_env()
run_pip('install', '-e', '%s#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
source_dir = env.venv_path/'src'/'pip-test-package'
result = env.run('git', 'branch', cwd=source_dir)
assert '* master' in result.stdout, result.stdout
def test_git_with_non_editable_unpacking():
"""
Test cloning a git repository from a non-editable URL with a given tag.
"""
reset_env()
result = run_pip('install', '--global-option=--version', local_checkout(
'git+http://github.com/pypa/pip-test-package.git@0.1.2#egg=pip-test-package'
), expect_error=True)
assert '0.1.2' in result.stdout
def test_git_with_editable_where_egg_contains_dev_string():
"""
Test cloning a git repository from an editable url which contains "dev" string
"""
reset_env()
result = run_pip('install', '-e', '%s#egg=django-devserver' %
local_checkout('git+git://github.com/dcramer/django-devserver.git'))
result.assert_installed('django-devserver', with_files=['.git'])
def test_git_with_non_editable_where_egg_contains_dev_string():
"""
Test cloning a git repository from a non-editable url which contains "dev" string
"""
env = reset_env()
result = run_pip('install', '%s#egg=django-devserver' %
local_checkout('git+git://github.com/dcramer/django-devserver.git'))
devserver_folder = env.site_packages/'devserver'
assert devserver_folder in result.files_created, str(result)
def test_git_with_ambiguous_revs():
"""
Test git with two "names" (tag/branch) pointing to the same commit
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
package_url = 'git+file://%s@0.1#egg=version_pkg' % (version_pkg_path.abspath.replace('\\', '/'))
env.run('git', 'tag', '0.1', cwd=version_pkg_path)
result = run_pip('install', '-e', package_url)
assert 'Could not find a tag or branch' not in result.stdout
# it is 'version-pkg' instead of 'version_pkg' because
# egg-link name is version-pkg.egg-link because it is a single .py module
result.assert_installed('version-pkg', with_files=['.git'])
def test_git_works_with_editable_non_origin_repo():
# set up, create a git repo and install it as editable from a local directory path
env = reset_env()
version_pkg_path = _create_test_package(env)
run_pip('install', '-e', version_pkg_path.abspath)
# 'freeze'ing this should not fall over, but should result in stderr output warning
result = run_pip('freeze', expect_stderr=True)
assert "Error when trying to get requirement" in result.stderr
assert "Could not determine repository location" in result.stdout
assert "version-pkg==0.1" in result.stdout
|
synth3tk/the-blue-alliance
|
helpers/event_insights_helper.py
|
Python
|
mit
| 6,320 | 0.002848 |
import logging
from collections import defaultdict
class EventInsightsHelper(object):
@classmethod
def calculate_event_insights(cls, matches, year):
INSIGHTS_MAP = {
2016: cls.calculate_event_insights_2016
}
if year in INSIGHTS_MAP:
return INSIGHTS_MAP[year](matches)
else:
return None
@classmethod
def calculate_event_insights_2016(cls, matches):
qual_matches = []
playoff_matches = []
for match in matches:
if match.comp_level == 'qm':
qual_matches.append(match)
else:
playoff_matches.append(match)
qual_insights = cls._calculate_event_insights_2016_helper(qual_matches)
playoff_insights = cls._calculate_event_insights_2016_helper(playoff_matches)
return {
'qual': qual_insights,
'playoff': playoff_insights,
}
@classmethod
def _calculate_event_insights_2016_helper(cls, matches):
# defenses
defense_opportunities = defaultdict(int)
defense_damaged = defaultdict(int)
breaches = 0
# towers
high_goals = 0
low_goals = 0
challenges = 0
scales = 0
captures = 0
# scores
winning_scores = 0
win_margins = 0
total_scores = 0
auto_scores = 0
crossing_scores = 0
boulder_scores = 0
tower_scores = 0
foul_scores = 0
high_score = [0, "", ""] # score, match key, match name
finished_matches = 0
for match in matches:
if not match.has_been_played:
continue
red_score = match.alliances['red']['score']
blue_score = match.alliances['blue']['score']
win_score = max(red_score, blue_score)
winning_scores += win_score
win_margins += (win_score - min(red_score, blue_score))
total_scores += red_score + blue_score
if win_score > high_score[0]:
high_score = [win_score, match.key_name, match.short_name]
for alliance_color in ['red', 'blue']:
try:
alliance_breakdown = match.score_breakdown[alliance_color]
auto_scores += alliance_breakdown['autoPoints']
crossing_scores += alliance_breakdown['teleopCrossingPoints']
boulder_scores += alliance_breakdown['teleopBoulderPoints']
tower_scores += alliance_breakdown['teleopChallengePoints'] + alliance_breakdown['teleopScalePoints']
foul_scores += alliance_breakdown['foulPoints']
pos1 = 'LowBar'
pos2 = alliance_breakdown['position2']
pos3 = alliance_breakdown['position3']
pos4 = alliance_breakdown['position4']
pos5 = alliance_breakdown['position5']
positions = [pos1, pos2, pos3, pos4, pos5]
for pos_idx, pos in enumerate(positions):
defense_opportunities[pos] += 1
if alliance_breakdown['position{}crossings'.format(pos_idx + 1)] == 2:
defense_damaged[pos] += 1
breaches += 1 if alliance_breakdown['teleopDefensesBreached'] else 0
high_goals += alliance_breakdown['autoBouldersHigh'] + alliance_breakdown['teleopBouldersHigh']
low_goals += alliance_breakdown['autoBouldersLow'] + alliance_breakdown['teleopBouldersLow']
captures += 1 if alliance_breakdown['teleopTowerCaptured'] else 0
for tower_face in ['towerFaceA', 'towerFaceB', 'towerFaceC']:
if alliance_breakdown[tower_face] == 'Challenged':
challenges += 1
elif alliance_breakdown[tower_face] == 'Scaled':
scales += 1
except Exception, e:
logging.error("Event insights failed for {}".format(match.key.id()))
finished_matches += 1
if finished_matches == 0:
return {}
opportunities_1x = 2 * finished_matches # once per alliance
opportunities_3x = 6 * finished_matches # 3x per alliance
event_insights = {
'LowBar': [0, 0, 0],
'A_ChevalDeFrise': [0, 0, 0],
'A_Portcullis': [0, 0, 0],
'B_Ramparts': [0, 0, 0],
'B_Moat': [0, 0, 0],
|
'C_SallyPort': [0, 0, 0],
'C_Drawbridge': [0, 0, 0],
'D_RoughTerrain': [0, 0, 0],
'D_RockWall': [0, 0, 0],
'average_high_goals': float(high_goals) / (2 * finished_matches),
'
|
average_low_goals': float(low_goals) / (2 * finished_matches),
'breaches': [breaches, opportunities_1x, 100.0 * float(breaches) / opportunities_1x], # [# success, # opportunities, %]
'scales': [scales, opportunities_3x, 100.0 * float(scales) / opportunities_3x],
'challenges': [challenges, opportunities_3x, 100.0 * float(challenges) / opportunities_3x],
'captures': [captures, opportunities_1x, 100.0 * float(captures) / opportunities_1x],
'average_win_score': float(winning_scores) / finished_matches,
'average_win_margin': float(win_margins) / finished_matches,
'average_score': float(total_scores) / (2 * finished_matches),
'average_auto_score': float(auto_scores) / (2 * finished_matches),
'average_crossing_score': float(crossing_scores) / (2 * finished_matches),
'average_boulder_score': float(boulder_scores) / (2 * finished_matches),
'average_tower_score': float(tower_scores) / (2 * finished_matches),
'average_foul_score': float(foul_scores) / (2 * finished_matches),
'high_score': high_score, # [score, match key, match name]
}
for defense, opportunities in defense_opportunities.items():
event_insights[defense] = [defense_damaged[defense], opportunities, 100.0 * float(defense_damaged[defense]) / opportunities] # [# damaged, # opportunities, %]
return event_insights
|
brunorijsman/coding-katas
|
bank-ocr-python/bank-ocr.py
|
Python
|
mit
| 3,837 | 0.019547 |
import copy
pattern_raw_digits = [
[' _ ',
'| |',
'|_|'],
[' ',
' |',
' |'],
[' _ ',
' _|',
'|_ '],
[' _ ',
' _|',
' _|'],
[' ',
'|_|',
' |'],
[' _ ',
'|_ ',
' _|'],
[' _ ',
'|_ ',
'|_|'],
[' _ ',
' |',
' |'],
[' _ ',
'|_|',
'|_|'],
[' _ ',
'|_|',
' _|']]
def read_raw_number(file):
raw_number = []
for row in range(3):
line = file.readline()
if line == "":
return None
line = line.rstrip('\n')
raw_number.append(line)
file.readline()
return raw_number
def print_raw_number(raw_number):
for i in range(3):
print(raw_number[i])
def read_expected_result(file):
return file.readline().rstrip('\n')
def parse_raw_digit(raw_digit):
for digit in range(10):
if pattern_raw_digits[digit] == raw_digit:
return str(digit)
return '?'
def parse_raw_number(raw_number):
number = ''
for digit_index in range(9):
raw_digit = []
for row in range(3):
start = digit_index * 3
end = start + 3
raw_digit_line = raw_number[row][start:end]
raw_digit.append(raw_digit_line)
digit = parse_raw_digit(raw_digit)
number += digit
return number
def is_valid(number):
if len(number) != 9:
return False
for i in range(9):
digit = number[i]
if not digit in "0123456789":
return False
return True
# assumes number is valid
def is_checksum_ok(number):
total = 0
for i in range(9):
digit = number[i]
total += int(digit) * (9 - i)
return (total % 11) == 0
def classify_number(number):
if is_valid(number):
if is_checksum_ok(number):
return ""
else:
return " ERR"
else:
|
return " ILL"
def change_one_char(raw_number, row, col, new_char):
new_raw_number = copy.copy(raw_number)
new_raw_number[row] = raw_number[row][:col] + new_char + raw_number[row][col+1:]
return new_ra
|
w_number
def find_all_guesses(raw_number):
guesses = []
for row in range(3):
for col in range(27):
char = raw_number[row][col]
if (char == '_') or (char == '|'):
guess_raw_number = change_one_char(raw_number, row, col, ' ')
guess_number = parse_raw_number(guess_raw_number)
if classify_number(guess_number) == "":
guesses.append(guess_number)
elif (char == ' '):
guess_raw_number = change_one_char(raw_number, row, col, '|')
guess_number = parse_raw_number(guess_raw_number)
if classify_number(guess_number) == "":
guesses.append(guess_number)
guess_raw_number = change_one_char(raw_number, row, col, '_')
guess_number = parse_raw_number(guess_raw_number)
if classify_number(guess_number) == "":
guesses.append(guess_number)
print(guesses)
return guesses
def parse_and_classify_raw_number(raw_number):
number = parse_raw_number(raw_number)
classify = classify_number(number)
if classify != "":
guesses = find_all_guesses(raw_number)
if len(guesses) == 1:
number = guesses[0]
classify = classify_number(number)
elif len(guesses) > 1:
classify = " AMB " + str(sorted(guesses))
return number + classify
def run_all_test_cases():
file = open('test-data.txt')
fail_count = 0
while True:
raw_number = read_raw_number(file)
if raw_number == None:
break
result = parse_and_classify_raw_number(raw_number)
expected_result = read_expected_result(file)
print_raw_number(raw_number)
print('expected result:', expected_result)
print('result :', result)
if result == expected_result:
print('pass')
else:
print('fail')
fail_count += 1
print()
if fail_count == 0:
print("ALL PASS")
else:
print(fail_count, "FAILURE(S)")
file.close()
run_all_test_cases()
|
Tanmay28/coala
|
coalib/tests/bears/LocalBearTest.py
|
Python
|
agpl-3.0
| 585 | 0.005128 |
import sys
sys.path.insert(0, ".")
import unittest
from coalib.settings.Section import Section
from coalib.bears.L
|
ocalBear import LocalBear, BEAR_KIND
class LocalBearTest(unittest.TestCase):
def test_api(self):
test_object = LocalBear(Section("name"), None)
self.assertRaises(NotImplementedError,
test_object.run,
"filename",
["file\n"])
def test_kind(self):
self.assertEqual(LocalBear.kind(),
|
BEAR_KIND.LOCAL)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.