repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
ericholscher/cookiecutter
|
tests/__init__.py
|
Python
|
bsd-3-clause
| 3,893 | 0.003596 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__init__
---------
Contains testing helpers.
"""
import os
import shutil
import stat
import sys
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest
else:
import unittest
def force_delete(func, path, exc_info):
"""
Error handler for `shutil.rmtree()` equivalent to `rm -rf`
Usage: `shutil.rmtree(path, onerror=force_delete)`
From stackoverflow.com/questions/2656322
"""
if not os.access(path, os.W_OK):
# Is the error an access error?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
class CookiecutterCleanSystemTestCase(unittest.TestCase):
"""
Test case that simulates a clean system with no config/cloned cookiecutters.
During setUp:
* Back up the `~/.cookiecutterrc` config file to `~/.cookiecutterrc.backup`
* Back up the `~/.cookiecutters/` dir to `~/.cookiecutters.backup/`
* Starts off a test case with no pre-existing `~/.cookiecutterrc` or
`~/.cookiecutters/`
During tearDown:
* Delete `~/.cookiecutters/` only if a backup is present at
`~/.cookiecutters.backup/`
* Restore the `~/.cookiecutterrc` config file from `~/.cookiecutterrc.backup`
* Restore the `~/.cookiecutters/` dir from `~/.cookiecutters.backup/`
"""
def setUp(self):
# If ~/.cookiecutterrc is pre-existing, move it to a temp location
self.user_config_path = os.path.expanduser('~/.cookiecutterrc')
self.user_config_path_backup = os.path.expanduser(
'~/.cookiecutterrc.backup'
)
if os.path.exists(self.user_config_path):
self.user_config_found = True
shutil.copy(self.user_config_path, self.user_config_path_backup)
os.remove(self.user_config_path)
else:
self.user_config_found = False
# If the default cookiecutters_dir is pre-existing, move it to a
# temp location
self.cookiecutters_dir = os.path.expanduser('~/.cookiecutters')
self.cookiecutters_dir_backup = os.path.expanduser('~/.cookiecutters.backup')
if os.path.isdir(self.cookiecutters_dir):
|
self.cookiecutters_dir_found = True
# Remove existing backups before backing up. If they exist, they're stale.
if os.path.isdir(self.cookiecutters_dir_backup):
shutil.rmtree(self.cookiecutters_dir_backup)
shutil.copytree(self.cookiecutters_dir, self.cookiecutters_dir_backup)
else:
self.cookiecutters_dir_found = False
def tearDown(self):
# If it existed, restore ~/.cookiecutterrc
# We never write to ~/.
|
cookiecutterrc, so this logic is simpler.
if self.user_config_found and os.path.exists(self.user_config_path_backup):
shutil.copy(self.user_config_path_backup, self.user_config_path)
os.remove(self.user_config_path_backup)
# Carefully delete the created ~/.cookiecutters dir only in certain
# conditions.
if self.cookiecutters_dir_found:
# Delete the created ~/.cookiecutters dir as long as a backup exists
if os.path.isdir(self.cookiecutters_dir) and os.path.isdir(self.cookiecutters_dir_backup):
shutil.rmtree(self.cookiecutters_dir)
else:
# Delete the created ~/.cookiecutters dir.
# There's no backup because it never existed
if os.path.isdir(self.cookiecutters_dir):
shutil.rmtree(self.cookiecutters_dir)
# Restore the user's default cookiecutters_dir contents
if os.path.isdir(self.cookiecutters_dir_backup):
shutil.copytree(self.cookiecutters_dir_backup, self.cookiecutters_dir)
if os.path.isdir(self.cookiecutters_dir):
shutil.rmtree(self.cookiecutters_dir_backup)
|
sjh/python
|
default_dict.py
|
Python
|
apache-2.0
| 477 | 0.002096 |
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
u""" One way of implementing default dictionary. """
class DefaultDic
|
t(dict):
def __missing__(self, key):
u""" Return default value as key if no value specified dictionary key. """
return key
if __name__ == "__main__":
d = DefaultDict()
print(d, type(d), d.keys())
d['flop'] = 127
print(d, type(d), d.keys())
d['flip'] = 130
|
print(d, type(d), d.keys())
print(d['no_value'])
|
thomaslima/PySpice
|
PySpice/Spice/Simulation.py
|
Python
|
gpl-3.0
| 14,801 | 0.004256 |
###################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy o
|
f the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import logging
####################################################################################################
from ..Tools.StringTools import join_list, join_dict
from .NgSpice.Shared
|
import NgSpiceShared
from .Server import SpiceServer
####################################################################################################
_module_logger = logging.getLogger(__name__)
####################################################################################################
class CircuitSimulation:
"""Define and generate the spice instruction to perform a circuit simulation.
.. warning:: In some cases NgSpice can perform several analyses one after the other. This case
is partially supported.
"""
_logger = _module_logger.getChild('CircuitSimulation')
##############################################
def __init__(self, circuit,
temperature=27,
nominal_temperature=27,
pipe=True,
):
self._circuit = circuit
self._options = {} # .options
self._initial_condition = {} # .ic
self._saved_nodes = ()
self._analysis_parameters = {}
self.temperature = temperature
self.nominal_temperature = nominal_temperature
if pipe:
self.options('NOINIT')
self.options(filetype='binary')
##############################################
@property
def circuit(self):
return self._circuit
##############################################
def options(self, *args, **kwargs):
for item in args:
self._options[str(item)] = None
for key, value in kwargs.items():
self._options[str(key)] = str(value)
##############################################
@property
def temperature(self):
return self._options['TEMP']
@temperature.setter
def temperature(self, value):
self._options['TEMP'] = value
##############################################
@property
def nominal_temperature(self):
return self._options['TNOM']
@nominal_temperature.setter
def nominal_temperature(self, value):
self._options['TNOM'] = value
##############################################
def initial_condition(self, **kwargs):
""" Set initial condition for voltage nodes.
Usage: initial_condition(node_name1=value, ...)
"""
for key, value in kwargs.items():
self._initial_condition['V({})'.format(str(key))] = str(value)
# Fixme: .nodeset
##############################################
def save(self, *args):
# Fixme: pass Node for voltage node, Element for source branch current, ...
"""Set the list of saved vectors.
If no *.save* line is given, then the default set of vectors is saved (node voltages and
voltage source branch currents). If *.save* lines are given, only those vectors specified
are saved.
Node voltages may be saved by giving the node_name or *v(node_name)*. Currents through an
independent voltage source (including inductor) are given by *i(source_name)* or
*source_name#branch*. Internal device data are accepted as *@dev[param]*.
If you want to save internal data in addition to the default vector set, add the parameter
*all* to the additional vectors to be saved.
"""
self._saved_nodes = list(args)
##############################################
@property
def save_currents(self):
""" Save all currents. """
return self._options.get('SAVECURRENTS', False)
@save_currents.setter
def save_currents(self, value):
if value:
self._options['SAVECURRENTS'] = True
else:
del self._options['SAVECURRENTS']
##############################################
def reset_analysis(self):
self._analysis_parameters.clear()
##############################################
def operating_point(self):
"""Compute the operating point of the circuit with capacitors open and inductors shorted."""
self._analysis_parameters['op'] = ''
##############################################
def dc_sensitivity(self, output_variable):
"""Compute the sensitivity of the DC operating point of a node voltage or voltage-source branch
current to all non-zero device parameters.
General form:
.. code::
.sens outvar
Examples:
.. code::
.SENS V(1, OUT)
.SENS I(VTEST)
"""
self._analysis_parameters['sens'] = (output_variable,)
##############################################
def ac_sensitivity(self, output_variable,
start_frequency, stop_frequency, number_of_points, variation):
"""Compute the sensitivity of the AC values of a node voltage or voltage-source branch
current to all non-zero device parameters.
General form:
.. code::
.sens outvar ac dec nd fstart fstop
.sens outvar ac oct no fstart fstop
.sens outvar ac lin np fstart fstop
Examples:
.. code::
.SENS V(OUT) AC DEC 10 100 100 k
"""
if variation not in ('dec', 'oct', 'lin'):
raise ValueError("Incorrect variation type")
self._analysis_parameters['sens'] = (output_variable,
variation, number_of_points, start_frequency, stop_frequency)
##############################################
def dc(self, **kwargs):
"""Compute the DC transfer fonction of the circuit with capacitors open and inductors shorted.
General form:
.. code::
.dc srcnam vstart vstop vincr [ src2 start2 stop2 incr2 ]
*srcnam* is the name of an independent voltage or current source, a resistor or the circuit
temperature. *vstart*, *vstop*, and *vincr* are the starting, final, and incrementing values
respectively.
A second source (*src2*) may optionally be specified with associated sweep parameters. In
this case, the first source is swept over its range for each value of the second source.
Examples:
.. code::
.dc VIN 0 .2 5 5.0 0.25
.dc VDS 0 10 .5 VGS 0 5 1
.dc VCE 0 10 .2 5 IB 0 10U 1U
.dc RLoad 1k 2k 100
.dc TEMP -15 75 5
"""
parameters = []
for variable, value_slice in kwargs.items():
variable_lower = variable.lower()
if variable_lower[0] in ('v', 'i', 'r') or variable_lower == 'temp':
parameters += [variable, value_slice.start, value_slice.stop, value_slice.step]
else:
raise NameError('Sweep variable must be a voltage/current source, '
'a resistor or the circuit temperature')
self._analysis_parameters['dc'] = parameters
#################
|
derekjchow/models
|
research/slim/nets/mobilenet/conv_blocks.py
|
Python
|
apache-2.0
| 13,351 | 0.005243 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolution blocks for mobilenet."""
import contextlib
import functools
import tensorflow as tf
slim = tf.contrib.slim
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _split_divisible(num, num_ways, divisible_by=8):
"""Evenly splits num, num_ways so each piece is a multiple of divisible_by."""
assert num % divisible_by == 0
assert num / num_ways >= divisible_by
# Note: want to round down, we adjust each split to match the total.
base = num // num_ways // divisible_by * divisible_by
result = []
accumulated = 0
for i in range(num_ways):
r = base
while accumulated + r < num * (i + 1) / num_ways:
r += divisible_by
result.append(r)
accumulated += r
assert accumulated == num
return result
@contextlib.contextmanager
def _v1_compatible_scope_naming(scope):
if scope is None: # Create uniqified separable blocks.
with tf.variable_scope(None, default_name='separable') as s, \
tf.name_scope(s.original_name_scope):
yield ''
else:
# We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts.
# which provide numbered scopes.
scope += '_'
yield scope
@slim.add_arg_scope
def split_separable_conv2d(input_tensor,
num_outputs,
scope=None,
normalizer_fn=None,
stride=1,
rate=1,
endpoints=None,
use_explicit_padding=False):
"""Separable mobilenet V1 style convolution.
Depthwise convolution, with default non-linearity,
followed by 1x1 depthwise convolution. This is similar to
slim.separable_conv2d, but differs in tha it applies batch
normalization and non-linearity to depthwise. This matches
the basic building of Mobilenet Paper
(https://arxiv.org/abs/1704.04861)
Args:
input_tensor: input
num_outputs: number of outputs
scope: optional name of the scope. Note if provided it will use
scope_depthwise for deptwhise, and scope_pointwise for pointwise.
normalizer_fn: which normalizer function to use for depthwise/pointwise
stride: stride
rate: output rate (also known as dilation rate)
endpoints: optional, if provided, will export additional tensors to it.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
output tesnor
"""
with _v1_compatible_scope_naming(scope) as scope:
dw_scope = scope + 'depthwise'
endpoints = endpoints if endpoints is not None else {}
kernel_size = [3, 3]
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
input_tensor = _fixed_padding(input_tensor, kernel_size, rate)
net = slim.separable_conv2d(
input_tensor,
None,
kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope=dw_scope)
endpoints[dw_scope] = net
pw_scope = scope + 'pointwise'
net = slim.conv2d(
net,
num_outputs, [1, 1],
stride=1,
normalizer_fn=normalizer_fn,
scope=pw_scope)
endpoints[pw_scope] = net
return net
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by)
@slim.add_arg_scope
def expanded_conv(input_tensor,
num_outputs,
expansion_size=expand_input_by_factor(6),
stride=1,
rate=1,
kernel_size=(3, 3),
residual=True,
normalizer_fn=None,
project_activation_fn=tf.identity,
split_projection=1,
split_expansion=1,
split_divisible_by=8,
expansion_transform=None,
depthwise_location='expansion',
depthwise_channel_multiplier=1,
endpoints=None,
use_explicit_padding=False,
padding='SAME',
scope=None):
"""Depthwise Convolution Block with expansion.
Builds a composite convolution that has the following structure
expansion (1x1) -> depthwise (kernel_size) -> projection (1x1)
Args:
input_tensor: input
num_outputs: number of outputs in the final layer.
expansion_size: the size of expansion, could be a constant or a callable.
If latter it will be provided 'num_inputs' as an input. For forward
compatibility it should accept arbitrary keyword arguments.
Default will expand the input by factor of 6.
stride: depthwise stride
rate: depthwise rate
kernel_size: depthwise kernel
residual: whether to include residual connection between input
and output.
normalizer_fn: batchnorm or otherwise
project_activation_fn: activation function for the project layer
split_projection: how many ways to split projection operator
(that is conv expansion->bottleneck)
split_expansion: how many ways to split expansion op
(that is conv bottleneck->expansion) ops will keep dep
|
th divisible
by this value.
split_divisible_by: make sure every split group is divisible by this number.
expansion_transform: Optional function that takes expansion
as a single input and returns output.
depthwise_location: where to put depthwise covnvolutions supported
values None, 'input', 'output', 'expansion'
depthwise_channel_multiplier: depthwise channel multiplier:
each input will replicated (with different filters)
that many tim
|
es. So if input had c channels,
output will have c x depthwise_channel_multpilier.
endpoints: An optional dictionary into which intermediate endpoints are
placed. The keys "expansion_output", "depthwise_output",
"projection_output" and "expansion_transf
|
hawkphantomnet/leetcode
|
FirstMissingPositive/Solution.py
|
Python
|
mit
| 495 | 0.00202 |
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
for i in range(0, l):
cur = nums[i]
while cur >= 1 and cur <= l and nums[cur -
|
1] != cur:
|
tmp = nums[cur - 1]
nums[cur - 1] = cur
cur = tmp
for i in range(0, l):
if nums[i] != i + 1:
return i + 1
return l + 1
|
klnprj/testapp
|
django/db/models/fields/__init__.py
|
Python
|
bsd-3-clause
| 47,219 | 0.000911 |
import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.ipv6 import clean_ipv6_address
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the s
|
tart
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This i
|
s the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
'unique': _(u'%(model_name)s with this %(field_label)s '
u'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (self.empty_strings_allowed and
connection.features.interprets_empty_strings_as_nulls):
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
|
pinax/pinax-lms-activities
|
pinax/lms/activities/models.py
|
Python
|
mit
| 4,304 | 0.000465 |
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
import jsonfield
from .hooks import hookset
from .utils import load_path_attr
class UserState(models.Model):
"""
this stores the overall state of a particular user.
"""
user = models.OneToOneField(User, null=True, on_delete=models.SET_NULL)
data = jsonfield.JSONField(default=dict, blank=True)
@classmethod
def for_user(cls, user):
assert user.is_authenticated(), "user must be authenticated"
user_state, _ = cls.objects.get_or_create(user=user)
return user_state
def get(self, key):
return self.data.get(key)
def set(self, key, value):
self.data[key] = value
self.save()
class ActivityState(models.Model):
"""
this stores the overall state of a particular user doing a particular
activity across all sessions of that activity.
"""
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
activity_key = models.CharField(max_length=300)
activity_class_path = models.CharField(max_length=300)
# how many sessions have been completed by this user
completed_count = models.IntegerField(default=0)
data = jsonfield.JSONField(default=dict, blank=True)
class Meta:
unique_together = [("user", "activity_key")]
@property
def activity_class(self):
return load_path_attr(self.activity_class_path)
@property
def in_progress(self):
return next(iter(self.sessions.filter(completed=None)), None)
@property
def latest(self):
session, _ = self.sessions.get_or_create(completed=None)
return session
@property
def last_completed(self):
return self.sessions.filter(completed__isnull=False).order_by("-started").first()
@property
def all_sessions(self):
return self.sessions.order_by("started")
@classmethod
def state_for_user(cls, user, activity_key):
assert user.is_authenticated(), "user must be authenticated"
return cls.objects.filter(user=user, activity_key=activity_key).first()
@property
def progression(self):
if self.in_progress:
return "continue"
elif self.activity_class.repeatable:
return "repeat"
else:
return "completed"
class ActivitySessionState(models.Model):
"""
this stores the state of a particular session of a particular user
doing a particular activity.
"""
activity_state = models.ForeignKey(ActivityState, related_name="sessions", on_delete=models.CASCADE)
started = models.DateTimeField(default=timezone.now)
completed = models.DateTimeField(null=True) # NULL means in progress
data = jsonfield.JSONField(default=dict, blank=True)
class Meta:
unique_together = [("activity_state", "started")]
def mark_completed(self):
self.completed = timezone.now()
self.save()
self.activity_state.completed_count = models.F("completed_count") + 1
self.activity_state.save()
def activities_for_user(user):
activitie
|
s = {
"available": [],
"inprogress": [],
"completed": [],
"repeatable": []
}
for key, activity_class_pat
|
h in hookset.all_activities():
activity = load_path_attr(activity_class_path)
state = ActivityState.state_for_user(user, key)
user_num_completions = ActivitySessionState.objects.filter(
user=user,
activity_key=key,
completed__isnull=False
).count()
activity_entry = {
"activity_key": key,
"title": activity.title,
"description": activity.description,
"state": state,
"user_num_completions": user_num_completions,
"repeatable": activity.repeatable,
}
if state:
if state.in_progress:
activities["inprogress"].append(activity_entry)
elif activity.repeatable:
activities["repeatable"].append(activity_entry)
else:
activities["completed"].append(activity_entry)
else:
activities["available"].append(activity_entry)
return activities
|
Vodak/SINS
|
src/main.py
|
Python
|
gpl-3.0
| 91 | 0 |
"""
Fichier main qui lance le programme
"""
|
from Game import *
game = Game()
game.play()
| |
cwolferh/heat-scratch
|
heat/db/sqlalchemy/migrate_repo/versions/071_stack_owner_id_index.py
|
Python
|
apache-2.0
| 897 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine
|
):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
name_index = sqlalchemy.Index('ix_stack_owner_id', stack.c.owner_id,
mysql_length=36)
name_index.create(migrate_engine)
|
kennethlove/django
|
django/db/models/query.py
|
Python
|
bsd-3-clause
| 70,273 | 0.001352 |
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import itertools
import sys
from django.core import exceptions
from django.db import connections, router, transaction, IntegrityError
from django.db.models.fields import AutoField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import sql
from django.utils.functional import partition
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_object = None # (attname, rel_obj)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
return len(self._result_cache)
def __iter__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To m
|
inimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
|
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist as e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
###########################
|
JinnLynn/genpac
|
tests/util.py
|
Python
|
mit
| 1,389 | 0.002185 |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import sys
import os
import pytest
from contextlib import contextmanager
import genpac
from genpac._compat import string_types, iterkeys, iteritems
parametrize = pytest.mark.parametrize
skipif = pytest.mark.skipif
xfail = pytest.mark.xfail
_ETC_DIR = os.path.join(os.path.dirname(__file__), 'etc')
_TMP_DIR = os.path.join(os.path.dirname(__file__), 'tmp')
# 是否是自己的机子
is_own = sys.platform.startswith('darwin') and \
''.join(os.environ.values()).find('JinnLynn') >= 0
is_not_own = not is_own
def join_etc(*args):
return os.path.join(_ETC_DIR, *args)
def join_tmp(*args):
return os.path.join(_TMP_DIR, *args)
@contextmanager
def buildenv(envs=None, argv=None, **kwargs):
envs = envs or {}
argv = argv or
|
[]
if isinstance(argv, string_types):
argv = argv.split(' ')
if not argv or argv[0] != 'genpac':
argv.insert(0, 'genpac')
envs.setdefault('GENPAC_TEST_TMP', _TMP_DIR)
envs.setdefault('GENPAC_TEST_ETC', _ETC_DIR)
for k, v in iteritems(envs):
|
os.environ[k] = v
old_argv = sys.argv
sys.argv = argv
yield
genpac.Generator._gfwlists.clear()
for k in iterkeys(envs):
if k in os.environ:
del os.environ[k]
sys.argv = old_argv
|
wangjun/pyload
|
module/plugins/hoster/FreakshareCom.py
|
Python
|
gpl-3.0
| 6,038 | 0.004306 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from module.plugins.Hoster import Hoster
from module.plugins.internal.CaptchaService import ReCaptcha
class FreakshareCom(Hoster):
__name__ = "FreakshareCom"
__type__ = "hoster"
__pattern__ = r"http://(?:www\.)?freakshare\.(net|com)/files/\S*?/"
__version__ = "0.38"
__description__ = """Freakshare.com Download Hoster"""
__author_name__ = ("sitacuisses", "spoob", "mkaay", "Toilal")
__author_mail__ = ("sitacuisses@yahoo.de", "spoob@pyload.org", "mkaay@mkaay.de", "toilal.dev@gmail.com")
def setup(self):
self.multiDL = False
self.req_opts = []
def process(self, pyfile):
self.pyfile = pyfile
pyfile.url = pyfile.url.replace("freakshare.net/", "freakshare.com/")
if self.account:
self.html = self.load(pyfile.url, cookies=False)
pyfile.name = self.get_file_name()
self.download(pyfile.url)
else:
self.prepare()
self.get_file_url()
self.download(self.pyfile.url, post=self.req_opts)
check = self.checkDownload({"bad": "bad try",
"paralell": "> Sorry, you cant download more then 1 files at time. <",
"empty": "Warning: Unknown: Filename cannot be empty",
"wrong_captcha": "Wrong Captcha!",
"downloadserver": "No Downloadserver. Please try again later!"})
if check == "bad":
self.fail("Bad Try.")
elif check == "paralell":
self.setWait(300, True)
self.wait()
self.retry()
elif check == "empty":
self.fail("File not downloadable")
elif check == "wrong_captcha":
self.invalidCaptcha()
self.retry()
elif check == "downloadserver":
self.retry(5, 900, 'No Download server')
def prepare(self):
pyfile = self.pyfile
self.wantReconnect = False
self.download_html()
if not self.file_exists():
self.offline()
self.setWait(self.get_waiting_time())
pyfile.name = self.get_file_name()
pyfile.size = self.get_file_size()
self.wait()
return True
def download_html(self):
self.load("http://freakshare.com/index.php", {"language": "EN"}) # Set english language in server session
self.html = self.load(self.pyfile.url)
def get_file_url(self):
""" returns the absolute downloadable filepath
"""
if self.html is None:
self.download_html()
if not self.wantReconnect:
self.req_opts = self.get_download_options() # get the Post options for the Request
#file_url = self.pyfile.url
#return file_url
else:
self.offline()
def get_file_name(self):
if self.html is None:
self.download_html()
if not self.wantReconnect:
file_name = re.search(r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">([^ ]+)", self.html)
if file_name is not None:
file_name = file_name.group(1)
else:
file_name = self.pyfile.url
return file_name
else:
return self.pyfile.url
def get_file_size(self):
size = 0
if self.html is None:
self.download_html()
if not self.wantReconnect:
file_size_check = re.search(
r"<h1\sclass=\"box_heading\"\sstyle=\"text-align:center;\">[^ ]+ - ([^ ]+) (\w\w)yte", self.html)
if file_size_check is not None:
units = float(file_size_check.group(1).replace(",", ""))
pow = {'KB': 1, 'MB': 2, 'GB': 3}[file_size_check.group(2)]
size = int(units * 1024 ** pow)
return size
def get_waiting_time(self):
if self.html is None:
self.download_html()
if "Your Traffic is used up for today" in self.html:
self.wantReconnect = True
return 24 * 3600
timestring = re.search('\s*var\s(?:downloadWait|time)\s=\s(\d*)[.\d]*;', self.html)
if timestring:
r
|
eturn int(timestring.group(1)
|
) + 1 # add 1 sec as tenths of seconds are cut off
else:
return 60
def file_exists(self):
""" returns True or False
"""
if self.html is None:
self.download_html()
if re.search(r"This file does not exist!", self.html) is not None:
return False
else:
return True
def get_download_options(self):
re_envelope = re.search(r".*?value=\"Free\sDownload\".*?\n*?(.*?<.*?>\n*)*?\n*\s*?</form>",
self.html).group(0) # get the whole request
to_sort = re.findall(r"<input\stype=\"hidden\"\svalue=\"(.*?)\"\sname=\"(.*?)\"\s\/>", re_envelope)
request_options = dict((n, v) for (v, n) in to_sort)
herewego = self.load(self.pyfile.url, None, request_options) # the actual download-Page
# comment this in, when it doesnt work
# with open("DUMP__FS_.HTML", "w") as fp:
# fp.write(herewego)
to_sort = re.findall(r"<input\stype=\".*?\"\svalue=\"(\S*?)\".*?name=\"(\S*?)\"\s.*?\/>", herewego)
request_options = dict((n, v) for (v, n) in to_sort)
# comment this in, when it doesnt work as well
#print "\n\n%s\n\n" % ";".join(["%s=%s" % x for x in to_sort])
challenge = re.search(r"http://api\.recaptcha\.net/challenge\?k=([0-9A-Za-z]+)", herewego)
if challenge:
re_captcha = ReCaptcha(self)
(request_options["recaptcha_challenge_field"],
request_options["recaptcha_response_field"]) = re_captcha.challenge(challenge.group(1))
return request_options
|
roshantha9/AbstractManycoreSim
|
src/libMappingAndScheduling/FullyDynamic/TaskMappingSchemesFullyDyn.py
|
Python
|
gpl-3.0
| 320 | 0.028125 |
class TaskMappingSchemesFullyD
|
yn:
TASKMAPPINGSCHEMESFULLYDYN_NONE = 0 # this will give error
TASKMAPPINGSCHEMESFULLYDYN_RANDOM = 1
TASKMAPPI
|
NGSCHEMESFULLYDYN_LOWESTUTIL_NEARESTPARENT = 2
|
tadek-project/tadek-common
|
tadek/core/location.py
|
Python
|
gpl-3.0
| 9,460 | 0.008985 |
################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to tadek-licenses@comarch.com ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to tadek-licenses@comarch.com ##
## ##
################################################################################
import os
import sys
from tadek import models
from tadek import teststeps
from tadek import testcases
from tadek import testsuites
from tadek.core import locale
from tadek.core.structs import ErrorBox
_DIRS_MAP = {
"models": models,
"teststeps": teststeps,
"testcases": testcases,
"testsuites": testsuites
}
_LOCALE_DIR = "locale"
class NameConflictError(Exception):
'''
Raised when a name conflict module takes place inside some.
'''
def __init__(self, module, name):
Exception.__init__(self, '.'.join([module.__name__, name]))
def add(path, enabled=True):
'''
Adds a location of models and test cases specified by the path.
:param path: A path to a location directory
:type path: string
:param enabled: True if an added location should be enabled, False otherwise
:type enabled: boolean
'''
path = os.path.abspath(path)
if path in _cache:
return None
_cache[path] = enabled
if enabled:
return enable(path)
return []
def remove(path):
'''
Removes a location of models and test cases specified by the path.
:param path: A path to a location directory
:type path: string
'''
path = os.path.abspath(path)
if path not in _cache:
return
disable(path)
del _cache[path]
def get(enabled=None):
'''
Gets a list of all locations.
'''
if enabled is None:
return _cache.keys()
elif enabled:
return [path for path in _cache if _cache[path]]
else:
return [path for path in _cache if not _cache[path]]
def enable(path):
'''
Enables a location of models and test cases specified by the path.
:param path: A path to a location directory
:type path: string
'''
path = os.path.abspath(path)
if path not in _cache:
return None
_cache[path] = True
errors = []
for dirname, module in _DIRS_MAP.iteritems(
|
):
errors.extend(_addModuleDir(module, os.path.join(path, dirname)))
# Add a corresponding locale
locale.add(os.path.join(path, _LOCALE_DIR))
if errors:
disable(path)
return errors
def disable(path):
|
'''
Disables a location of models and test cases specified by the path.
:param path: A path to a location directory
:type path: string
'''
path = os.path.abspath(path)
for dirname, module in _DIRS_MAP.iteritems():
_removeModuleDir(module, os.path.join(path, dirname))
# Remove a corresponding locale
locale.remove(os.path.join(path, _LOCALE_DIR))
_cache[path] = False
def clear():
'''
Clears imported modules from all locations.
'''
for module in _DIRS_MAP.itervalues():
_clearModule(module)
# A locations cache
_cache = {}
# Location directories oriented functions:
def getModels():
'''
Gets a dictionary containing all currently avalaible models modules.
:return: A dictionary with models modules
:rtype: dictionary
'''
content = _getModuleContent(models)
content.pop("__init__", None)
return content
def getSteps():
'''
Gets a dictionary containing all currently avalaible root test steps
modules.
:return: A dictionary with test steps modules
:rtype: dictionary
'''
content = _getModuleContent(teststeps)
content.pop("__init__", None)
return content
def getCases():
'''
Gets a dictionary containing all currently avalaible root test cases
modules.
:return: A dictionary with test cases modules
:rtype: dictionary
'''
content = _getModuleContent(testcases)
content.pop("__init__", None)
return content
def getSuites():
'''
Gets a dictionary containing all currently avalaible root test suites
modules.
:return: A dictionary with test suites modules
:rtype: dictionary
'''
content = _getModuleContent(testsuites)
content.pop("__init__", None)
return content
_MODULE_EXTS = (".py", ".pyc", ".pyo")
def _getDirContent(dir, package=None):
'''
Gets content of the given directory.
'''
content = {}
for file in sorted(os.listdir(dir)):
name = None
path = os.path.join(dir, file)
if os.path.isfile(path):
name, ext = os.path.splitext(file)
if ext not in _MODULE_EXTS or (package and name == "__init__"):
continue
name = '.'.join([package, name]) if package else name
elif os.path.isdir(path):
pkg = False
for ext in _MODULE_EXTS:
if os.path.exists(os.path.join(path, "__init__" + ext)):
pkg = True
break
if not pkg:
continue
name = '.'.join([package, file]) if package else file
content.update(_getDirContent(path, name))
path = os.path.join(path, "__init__" + ext)
if name and name not in content:
content[name] = path
return content
def _getModuleContent(module):
'''
Gets content of the given module from the specified directory.
'''
content = {}
for path in module.__path__:
for name, path in _getDirContent(path).iteritems():
if name not in content:
content[name] = path
return content
def _addModuleDir(modul
|
menpo/lsfm
|
lsfm/model.py
|
Python
|
bsd-3-clause
| 968 | 0 |
import numpy as np
from menpo.model import PCAModel
from menpo.visualize import print_progress
def prune(weights, n_retained=50):
w_norm = (weights[:
|
, :n_retained] ** 2).sum(axis=1)
# High weights here suggest problematic samples
bad_to_good_index = np.argsort(w_norm)[::-1]
|
return w_norm, bad_to_good_index
def pca_and_weights(meshes, retain_eig_cum_val=0.997, verbose=False):
model = PCAModel(meshes, verbose=verbose)
n_comps_retained = (model.eigenvalues_cumulative_ratio() <
retain_eig_cum_val).sum()
if verbose:
print('\nRetaining {:.2%} of eigenvalues keeps {} components'.format(
retain_eig_cum_val, n_comps_retained))
model.trim_components(retain_eig_cum_val)
if verbose:
meshes = print_progress(meshes, prefix='Calculating weights')
weights = (np.vstack([model.project(m) for m in meshes])
/ np.sqrt(model.eigenvalues))
return model, weights
|
fatrix/django-golive
|
project_examples/django_example/django_example/urls.py
|
Python
|
bsd-2-clause
| 801 | 0.007491 |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from django.views.generic import TemplateView
urlpatterns = patterns('',
# Examples:
|
url(r'^$', TemplateView.as_view(template_name='index.html'),
name='index'),
# url(r'^django_example/', include('django_example.foo.urls')),
# Uncomment the admin/doc li
|
ne below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
DigitalCampus/django-nurhi-oppia
|
oppia/templatetags/query_string.py
|
Python
|
gpl-3.0
| 2,788 | 0.002511 |
import urllib
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.tag
def query_string(parser, token):
"""
Allows to manipulate the query string of a page by adding and removing keywords.
If a given value is a context variable it will resolve it.
Usage:
http://www.url.com/{% query_string "param_to_add=value, param_to_add=value" "param_to_remove, params_to_remove" %}
"""
try:
tag_name, add_string, remove_string = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires two arguments" % token.contents.split()[0]
if not (add_string[0] == add_string[-1] and add_string[0] in ('"', "'")) or not (remove_string[0] == remove_string[-1] and remove_string[0] in ('"', "'")):
raise template.TemplateSyntaxError, "%r tag's argument should be in quotes" % tag_name
add = string_to_dict(add_string[1:-1])
remove = string_to_list(remove_string[1:-1])
return QueryStringNode(add, remove)
class QueryStringNode(template.Node):
def __init__(self, add, remove):
self.add = add
self.remove = remove
def render(self, context):
p = {}
for k, v in context["request"].GET.items():
p[k] = v
return get_query_string(p, self.add, self.remove, context)
def get_query_string(p, new_params, remove, context):
"""
Add and remove query parameters. From `django.contrib.admin`.
"""
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if k in p and v is None:
|
del p[k]
elif v is not None:
|
p[k] = v
for k, v in p.items():
try:
p[k] = template.Variable(v).resolve(context)
except:
p[k] = v
return mark_safe('?' + '&'.join([u'%s=%s' % (urllib.quote_plus(str(k)), urllib.quote_plus(str(v))) for k, v in p.items()]))
# Taken from lib/utils.py
def string_to_dict(string):
kwargs = {}
if string:
string = str(string)
if ',' not in string:
# ensure at least one ','
string += ','
for arg in string.split(','):
arg = arg.strip()
if arg == '':
continue
kw, val = arg.split('=', 1)
kwargs[kw] = val
return kwargs
def string_to_list(string):
args = []
if string:
string = str(string)
if ',' not in string:
# ensure at least one ','
string += ','
for arg in string.split(','):
arg = arg.strip()
if arg == '':
continue
args.append(arg)
return args
|
mikesligo/distributed-search
|
Exceptions/Invalid_IP_exception.py
|
Python
|
mit
| 48 | 0 |
class
|
Invalid_IP_exceptio
|
n(Exception):
pass
|
eemiliosl/pyanno_voting
|
setup.py
|
Python
|
bsd-2-clause
| 916 | 0.021834 |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top lev
|
el
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "PyAnno",
version = "0.1a",
author = "Whatever",
author_email = "andrewjcarter@gmail.com",
description = "Here we describe what we put in the github repo description ",
license = "BSD",
keywords = "labels voting annotation",
url
|
= "my own webpage",
packages=['pyanno'], # This what is really needed, the rest is optional
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
andela-bojengwa/talk
|
venv/lib/python2.7/site-packages/markdown/extensions/wikilinks.py
|
Python
|
mit
| 2,901 | 0.005515 |
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links.
See <https://pythonhosted.org/Markdown/extensions/wikilinks.html>
for documentation.
Original code Copyright [Waylan Limberg](http://achinghead.com/).
All changes Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import Pattern
from ..util import etree
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(Extension):
def __init__ (self, *args, **kwargs):
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
super(WikiLinkExtension, self).__init__(*args, **kwarg
|
s)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE =
|
r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(Pattern):
def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(*args, **kwargs) :
return WikiLinkExtension(*args, **kwargs)
|
ericmoritz/flask-auth
|
flaskext/auth/views.py
|
Python
|
bsd-2-clause
| 2,020 | 0.00198 |
from flask import (Module, request, abort, current_app, session, flash,
redirect, render_template)
import re
mod = Module(__name__, name="auth")
def check_next(next):
"""return the value of next if next is a valid next param,
it returns None if the next param is invalid"""
# security check stolen from Django, thanks Django!
# Try to get the next param
# Light security check -- make sure redirect_to isn't garbage.
if ' ' in next:
return None
# Heavier security check -- redirects to http://example.com should
# not be allowed, but things like /view/?param=http://example.com
# should be allowed. This regex checks if there is a '//' *before* a
# question mark.
elif '//' in next and
|
re.match(r'[^\?]*//', next):
return None
else:
return next
@mod.route("/login/", methods=["GET", "POST"])
def login():
backend = current_app.config['AUTH_BACKEND']
next = request.args.get("next", "")
next = check_next(next)
# Try to authenticate
error = None
if request.method == "POST":
# Try to authenticate based on the form data
result = backend.authenticate(request.form)
# If something is
|
returned, use that as the auth_key in the session
if result is not None:
session["auth_key"] = result
flash("Login successful.")
if next:
return redirect(next)
else:
flash("Login Invalid", "error")
return render_template("auth/login.html")
@mod.route("/logout/")
def logout():
# Get the AUTH_BACKEND
backend = current_app.config['AUTH_BACKEND']
auth_key = session.get("auth_key")
if auth_key:
next = request.args.get("next", "/")
# Let the backend know about the logout
backend.logout(auth_key)
# Throw away the auth_key
session.pop("auth_key", None)
# Flash a pretty message
flash("You are now logged out")
return redirect(next)
|
google-research/language
|
language/xsp/data_preprocessing/wikisql_preprocessing.py
|
Python
|
apache-2.0
| 5,129 | 0.008384 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for loading the WikiSQL dataset."""
from __future__ import absolute_import
from __future__ import division
from __f
|
uture__ import print_function
import json
from language.xsp.data_preprocessing import abstract_sql
from language.xsp.data_preprocessing import abstract_sql_converters
from language.xsp.data_preprocessing.nl_to_sql_example import NLToSQLExample
from language.xsp.data_preprocessing.nl_to_sql_example import populate_utterance
from language.xsp.data_preprocessing.sql_parsing im
|
port ParseError
from language.xsp.data_preprocessing.sql_parsing import populate_sql
from language.xsp.data_preprocessing.sql_utils import preprocess_sql
import sqlparse
import tensorflow.compat.v1.gfile as gfile
def normalize_sql(sql, replace_period=True):
"""Normalizes WikiSQL SQL queries."""
sql = sql.replace('_/_', '_OR_')
sql = sql.replace('/', '_OR_')
sql = sql.replace('?', '')
if replace_period:
sql = sql.replace('.', '')
sql = sql.replace('(', '')
sql = sql.replace(')', '')
sql = sql.replace('%', '')
return sql
def normalize_entities(entity_name):
"""Normalizes database entities (table and column names)."""
entity_name = normalize_sql(entity_name)
entity_name = entity_name.replace(' ', '_').upper()
return entity_name
def convert_wikisql(input_example,
schema,
tokenizer,
generate_sql,
anonymize_values,
use_abstract_sql,
tables_schema=None,
allow_value_generation=False):
"""Converts a WikiSQL example into a NLToSQLExample."""
example = NLToSQLExample()
try:
try:
example = populate_utterance(example, input_example[0], schema, tokenizer)
except ValueError as e:
print(e)
return None
# WikiSQL databases have a single table.
assert len(schema) == 1
# Some preprocessing of the WikiSQL SQL queries.
sql = input_example[1].rstrip('; ')
sql = sql.replace('TABLE', list(schema.keys())[0])
sql = sql.replace('_FIELD', '')
string_split_sql = sql.split(' ')
if string_split_sql[1].lower() in {'count', 'min', 'max', 'avg', 'sum'}:
# Add parentheses around the column that's an argument of any of these
# aggregate functions (because gold annotations don't have it).
sql = ' '.join(string_split_sql[0:2] + ['(', string_split_sql[2], ')'] +
string_split_sql[3:])
sql = normalize_sql(sql, replace_period=False)
try:
sql = preprocess_sql(sql)
except UnicodeDecodeError as e:
return None
sql = sql.lower()
parsed_sql = sqlparse.parse(sql)[0]
successful_copy = True
if generate_sql:
try:
if use_abstract_sql:
successful_copy = abstract_sql_converters.populate_abstract_sql(
example, sql, tables_schema, anonymize_values)
else:
successful_copy = populate_sql(parsed_sql, example, anonymize_values)
except (ParseError, ValueError, AssertionError, KeyError, IndexError,
abstract_sql.ParseError, abstract_sql.UnsupportedSqlError) as e:
return None
if not successful_copy and not allow_value_generation:
return None
if not example.gold_sql_query.actions:
return None
elif example.gold_sql_query.actions[-1].symbol == '=':
return None
except UnicodeEncodeError as e:
print(e)
return None
return example
def load_wikisql_tables(filepath):
"""Loads the WikiSQL tables from a path and reformats as the format."""
dbs = dict()
with gfile.Open(filepath) as infile:
tables = [json.loads(line) for line in infile if line]
for table in tables:
db_dict = dict()
table_name = table['section_title'] if 'section_title' in table and table[
'section_title'] else (
table['name'] if 'name' in table else table['page_title'])
table_name = normalize_entities(table_name)
db_dict[table_name] = list()
for column_name, column_type in zip(table['header'], table['types']):
if column_type == 'real':
column_type = 'number'
assert column_type in {'text', 'number'}, column_type
column_name = normalize_entities(column_name)
db_dict[table_name].append({
'field name': column_name,
'is primary key': False,
'is foreign key': False,
'type': column_type
})
if table['id'] not in dbs:
dbs[table['id']] = db_dict
return dbs
|
psicobyte/ejemplos-python
|
ApendiceI/p202.py
|
Python
|
gpl-3.0
| 167 | 0.005988 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
class MiClase:
@staticmethod
d
|
ef metodo(entrada):
retur
|
n entrada
objeto = MiClase
print objeto.metodo(5)
|
tensorflow/tensorboard
|
tensorboard/backend/event_processing/plugin_asset_util.py
|
Python
|
apache-2.0
| 3,555 | 0.000281 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Load plugin assets from disk."""
import os.path
from tensorboard.compat import tf
_PLUGINS_DIR = "plugins"
def _IsDirectory(parent, item):
"""Helper that returns if parent/item is a directory."""
return tf.io.gfile.isdir(os.path.join(parent, item))
def PluginDirectory(logdir, plugin_name):
"""Returns the plugin directory for plugin_name."""
return os.path.join(logdir, _PLUGINS_DIR, plugin_name)
def ListPlugins(logdir):
"""List all the plugins that have registered assets in logdir.
If the plugins_dir does not exist, it returns an empty list. This maintains
compatibility with old directories that have no plugins written.
Args:
logdir: A directory that was created by a TensorFlow events writer.
Returns:
a list of plugin names, as strings
"""
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)
try:
entries = tf.io.gfile.listdir(plugins_dir)
except tf.errors.NotFoundError:
return []
# Strip trailing slashes, which listdir() includes for some filesystems
# for subdirectories, after using them to bypass IsDirectory().
return [
x.rstrip("/")
for x in entries
if x.endswith("/") or _IsDirectory(plugins_dir, x)
]
def ListAssets(logdir, plugin_name):
"""List all the assets that are available for given plugin in a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: A string name of a plugin to list assets for.
Returns:
A string list of available plugin assets. If the plugin subdirectory does
not exist (either because the logdir doesn't exist, or because the plugin
didn't register) an empty list is returned.
"""
plugin_dir = PluginDirectory(logdir, plugin_name)
try:
# Strip trailing slashes, which listdir() includes for some filesystems.
return [x.rstrip("/") for x in tf.io.gfile.listdir(plugin_dir)]
except tf.errors.NotFoundError:
return []
def RetrieveAsset(logdir, plugin_name, asset_name):
"""Retrieve a particular plugin asset from a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: The plugin we want an asset from.
as
|
set_name: The name of the requested asset.
Returns:
string contents of the plugin asset.
Raises:
KeyError: if the asset does not exist.
"""
asset_path = o
|
s.path.join(PluginDirectory(logdir, plugin_name), asset_name)
try:
with tf.io.gfile.GFile(asset_path, "r") as f:
return f.read()
except tf.errors.NotFoundError:
raise KeyError("Asset path %s not found" % asset_path)
except tf.errors.OpError as e:
raise KeyError(
"Couldn't read asset path: %s, OpError %s" % (asset_path, e)
)
|
renesas-rz/u-boot-2013.04
|
tools/patman/patman.py
|
Python
|
gpl-2.0
| 6,763 | 0.001774 |
#!/usr/bin/python
#
# Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""See README for more information"""
from optparse import OptionParser
import os
import re
import sys
import unittest
# Our modules
import checkpatch
import command
import gitutil
import patchstream
import project
import settings
import terminal
import test
parser = OptionParser()
parser.add_option('-a', '--no-apply', action='store_false',
dest='apply_patches', default=True,
help="Don't test-apply patches with git am")
parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
default=False, help='Display the README file')
parser.add_option('-c', '--count', dest='count', type='int',
default=-1, help='Automatically create patches from top n commits')
parser.add_option('-i', '--ignore-errors', action='store_true',
dest='ignore_errors', default=False,
help='Send patches email even if patch errors are found')
parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
default=False, help="Do a dry run (create but don't email patches)")
parser.add_option('-p', '--project', default=project.DetectProject(),
help="Project name; affects default option values and "
"aliases [default: %default]")
parser.add_option('-r', '--in-reply-to', type='string', action='store',
help="Message ID that this series is in reply to")
parser.add_option('-s', '--start', dest='start', type='int',
default=0, help='Commit to start creating patches from (0 = HEAD)')
parser.add_option('-t', '--ignore-bad-tags', action='store_true',
default=False, help='Ignore bad tags / aliases')
parser.add_option('--test', action='store_true', dest='test',
default=False, help='run tests')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False, help='Verbose output of errors and warnings')
parser.add_option('--cc-cmd', dest='cc_cmd', type='string', action='store',
default=None, help='Output cc list for patch file (used by git)')
parser.add_option('--no-check', action='store_false', dest='check_patch',
default=True,
help="Don't check for patch compliance")
parser.add_option('--no-tags', action='store_false', dest='process_tags',
default=True, help="Don't process subject tags as aliaes")
parser.usage = """patman [options]
Create patches from commits in a branch, check them and email them as
specified by tags you place in the commits. Use -n to do a dry run first."""
# Parse options twice: first to get the project and second to handle
# defaults properly (which depends on project).
(options, args) = parser.parse_args()
settings.Setup(parser, options.project, '')
(options, args) = parser.parse_args()
# Run our meagre tests
if options.test:
import doctest
sys.argv = [sys.argv[0]]
suite = unittest.TestLoader().loadTestsFromTestCase(test.TestPatch)
result = unittest.TestResult()
suite.run(result)
|
for module in ['gitutil', 'settings']:
suite = doctest.DocTestSuite(module)
suite.run(result)
# TODO: Surely we can just 'print' result?
print result
for test, err in result.errors:
print err
for test, err in result.failures:
print err
# Called from git with a patch filename as argument
# Printout a list of additional CC recipients for th
|
is patch
elif options.cc_cmd:
fd = open(options.cc_cmd, 'r')
re_line = re.compile('(\S*) (.*)')
for line in fd.readlines():
match = re_line.match(line)
if match and match.group(1) == args[0]:
for cc in match.group(2).split(', '):
cc = cc.strip()
if cc:
print cc
fd.close()
elif options.full_help:
pager = os.getenv('PAGER')
if not pager:
pager = 'more'
fname = os.path.join(os.path.dirname(sys.argv[0]), 'README')
command.Run(pager, fname)
# Process commits, produce patches files, check them, email them
else:
gitutil.Setup()
if options.count == -1:
# Work out how many patches to send if we can
options.count = gitutil.CountCommitsToBranch() - options.start
col = terminal.Color()
if not options.count:
str = 'No commits found to process - please use -c flag'
print col.Color(col.RED, str)
sys.exit(1)
# Read the metadata from the commits
if options.count:
series = patchstream.GetMetaData(options.start, options.count)
cover_fname, args = gitutil.CreatePatches(options.start, options.count,
series)
# Fix up the patch files to our liking, and insert the cover letter
series = patchstream.FixPatches(series, args)
if series and cover_fname and series.get('cover'):
patchstream.InsertCoverLetter(cover_fname, series, options.count)
# Do a few checks on the series
series.DoChecks()
# Check the patches, and run them through 'git am' just to be sure
if options.check_patch:
ok = checkpatch.CheckPatches(options.verbose, args)
else:
ok = True
if options.apply_patches:
if not gitutil.ApplyPatches(options.verbose, args,
options.count + options.start):
ok = False
cc_file = series.MakeCcFile(options.process_tags, cover_fname,
not options.ignore_bad_tags)
# Email the patches out (giving the user time to check / cancel)
cmd = ''
if ok or options.ignore_errors:
cmd = gitutil.EmailPatches(series, cover_fname, args,
options.dry_run, not options.ignore_bad_tags, cc_file,
in_reply_to=options.in_reply_to)
# For a dry run, just show our actions as a sanity check
if options.dry_run:
series.ShowActions(args, cmd, options.process_tags)
os.remove(cc_file)
|
pkimber/old_moderate
|
example/base.py
|
Python
|
apache-2.0
| 5,826 | 0.000515 |
""" Django settings """
from django.core.urlresolvers import reverse_lazy
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
TEMPLATE_STRING_IF_INVALID = '**** INVALID EXPRESSION: %s ****'
ADMINS = (
('admin', 'code@pkimber.net'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'web_static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'w@t8%tdwyi-n$u_s#4_+cwnq&6)1n)l3p-qe(ziala0j^vo12d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'reversion.middleware.RevisionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLIC
|
ATION = 'example.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTAL
|
LED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'reversion',
'south',
'example',
'base',
'login',
'moderate',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# User Profile.
AUTH_PROFILE_MODULE = 'example.UserProfile'
# URL where requests are redirected after login when the contrib.auth.login
# view gets no next parameter.
LOGIN_REDIRECT_URL = reverse_lazy('project.home')
# Login URL. Used with login_required decorators when a user
# must be logged in before accessing the view otherwise this URL
# will be called.
LOGIN_URL = reverse_lazy('login.login')
# Login URL. Used with login_required decorators when a user
# must be logged in before accessing the view otherwise this URL
# will be called.
# LOGIN_URL = reverse_lazy('login.login')
# https://github.com/johnsensible/django-sendfile
SENDFILE_BACKEND = 'sendfile.backends.development'
SENDFILE_ROOT = 'media-private'
FTP_STATIC_DIR = None
FTP_STATIC_URL = None
|
benob/chainer
|
chainer/links/connection/mlp_convolution_2d.py
|
Python
|
mit
| 3,009 | 0.000332 |
from chainer.functions.activation import relu
from chainer import link
from chainer.links.connection import convolution_2d
class MLPConvolution2D(link
|
.ChainList):
"""Two-dimensional MLP convolution layer of Network
|
in Network.
This is an "mlpconv" layer from the Network in Network paper. This layer
is a two-dimensional convolution layer followed by 1x1 convolution layers
and interleaved activation functions.
Note that it does not apply the activation function to the output of the
last 1x1 convolution layer.
Args:
in_channels (int): Number of channels of input arrays.
out_channels (tuple of ints): Tuple of number of channels. The i-th
integer indicates the number of filters of the i-th convolution.
ksize (int or pair of ints): Size of filters (a.k.a. kernels) of the
first convolution layer. ``ksize=k`` and ``ksize=(k, k)`` are
equivalent.
stride (int or pair of ints): Stride of filter applications at the
first convolution layer. ``stride=s`` and ``stride=(s, s)`` are
equivalent.
pad (int or pair of ints): Spatial padding width for input arrays at
the first convolution layer. ``pad=p`` and ``pad=(p, p)`` are
equivalent.
activation (function): Activation function for internal hidden units.
Note that this function is not applied to the output of this link.
use_cudnn (bool): If ``True``, then this link uses cuDNN if available.
conv_init: An initializer of weight matrices
passed to the convolution layers.
bias_init: An initializer of bias vectors
passed to the convolution layers.
See: `Network in Network <http://arxiv.org/abs/1312.4400v3>`.
Attributes:
activation (function): Activation function.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
wscale=1, activation=relu.relu, use_cudnn=True,
conv_init=None, bias_init=None):
assert len(out_channels) > 0
convs = [convolution_2d.Convolution2D(
in_channels, out_channels[0], ksize, stride, pad,
wscale=wscale, use_cudnn=use_cudnn,
initialW=conv_init, initial_bias=bias_init)]
for n_in, n_out in zip(out_channels, out_channels[1:]):
convs.append(convolution_2d.Convolution2D(
n_in, n_out, 1, wscale=wscale,
initialW=conv_init, initial_bias=bias_init,
use_cudnn=use_cudnn))
super(MLPConvolution2D, self).__init__(*convs)
self.activation = activation
def __call__(self, x):
"""Computes the output of the mlpconv layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the mlpconv layer.
"""
f = self.activation
for l in self[:-1]:
x = f(l(x))
return self[-1](x)
|
joshl8n/school-projects
|
illustrate/water.py
|
Python
|
gpl-3.0
| 1,287 | 0.034188 |
import pygame
from fish import Fish
from seaweed import Seaweed
class Water:
def __init__(self):
# color, pos_x, pos_y, width, height
self.mOrangeFish = Fish((255, 152, 0), 50, 175, 175, 100)
self.mGreyFish = Fish((96, 125, 139), 350, 130, 125, 200)
self.mRedFish = Fish((183, 28, 28), 200, 300, 175, 500)
# color, start-point x, start-point y, end-point x, end-point y, width
self.mSeaweed = Seaweed((104, 159, 56), 450, 450, 450, 500, 3)
self.mSeaweed2 = Seaweed((104, 159, 56), 400, 450, 400, 500, 3)
self.mSeaweed3 = Seaweed((104, 159, 56), 370, 430, 370, 500, 3)
self.mSeaweed4 = Seaweed((104, 159, 56), 390, 430, 390, 500, 3)
self.mSeaweed5 = Seaweed((104, 159, 56), 320, 450, 320, 500, 3)
return
def draw(self, surface):
color = (1, 87, 155)
pointlist =
|
[(0, 80),
(100, 100),
(200, 80),
(300, 100),
(400, 80),
(500, 100),
(600, 80),
(600, 500),
(0, 500)]
pygame.
|
draw.polygon(surface, color, pointlist, 0)
self.mOrangeFish.draw(surface)
self.mGreyFish.draw(surface)
self.mRedFish.draw(surface)
self.mSeaweed.draw(surface)
self.mSeaweed2.draw(surface)
self.mSeaweed3.draw(surface)
self.mSeaweed4.draw(surface)
self.mSeaweed5.draw(surface)
return
|
hypermindr/barbante
|
barbante/recommendation/tests/__init__.py
|
Python
|
mit
| 41 | 0 |
"""
Tests fo
|
r mo
|
dule recommendation.
"""
|
magnusmorton/pycket
|
pycket/hash/simple.py
|
Python
|
mit
| 6,811 | 0.002496 |
from pycket import values
from pycket.error import SchemeException
from pycket.hash.base import (
W_MutableHashTable,
W_ImmutableHashTable,
w_missing,
get_dict_item)
from pycket.hash.persistent_hash_map import make_persistent_hash_type
from rpython.rlib.objectmodel import compute_hash, r_dict, specialize
from rpython.rlib.rarithmetic import r_uint
@specialize.arg(0)
def make_simple_mutable_table(cls, keys=None, vals=None):
data = r_dict(cls.cmp_value, cls.hash_value, force_non_null=True)
if keys is not None and vals is not None:
assert len(keys) == len(vals)
for i, k in enumerate(keys):
data[k] = vals[i]
return cls(data)
@specialize.arg(0)
def make_simple_mutable_table_assocs(cls, assocs, who):
if not assocs.is_proper_list():
raise SchemeException("%s: not given proper list" % who)
data = r_dict(cls.cmp_value, cls.hash_value, forc
|
e_non_null=True)
while isinstance(assocs, values.W_Cons):
entry, assocs = assocs.car(), assocs.cdr()
if not isinstance(entry, values.W_Cons):
raise SchemeException("%s: expected list of pairs" % who)
key, val = entry.car(), entry.cdr()
data[key] = val
return cls(data)
@specialize.arg(0)
def make_simple_immutable_table(cls, keys=None, vals=None):
table = cls.EMPTY
if keys is not None and vals is not None:
assert len(keys) == len(vals
|
)
for i, k in enumerate(keys):
table = table.assoc(k, vals[i])
return table
@specialize.arg(0)
def make_simple_immutable_table_assocs(cls, assocs, who):
if not assocs.is_proper_list():
raise SchemeException("%s: not given proper list" % who)
table = cls.EMPTY
while isinstance(assocs, values.W_Cons):
entry, assocs = assocs.car(), assocs.cdr()
if not isinstance(entry, values.W_Cons):
raise SchemeException("%s: expected list of pairs" % who)
key, val = entry.car(), entry.cdr()
table = table.assoc(key, val)
return table
class W_SimpleMutableHashTable(W_MutableHashTable):
_attrs_ = ['data']
_immutable_fields_ = ["data"]
@staticmethod
def hash_value(v):
raise NotImplementedError("abstract method")
@staticmethod
def cmp_value(a, b):
raise NotImplementedError("abstract method")
def __init__(self, data):
self.data = data
def make_copy(self):
raise NotImplementedError("abstract method")
def hash_items(self):
return self.data.items()
def tostring(self):
lst = [values.W_Cons.make(k, v).tostring() for k, v in self.data.iteritems()]
return "#hash(%s)" % " ".join(lst)
def hash_set(self, k, v, env, cont):
from pycket.interpreter import return_value
self.data[k] = v
return return_value(values.w_void, env, cont)
def hash_remove_inplace(self, k, env, cont):
from pycket.interpreter import return_value
del self.data[k]
return return_value(values.w_void, env, cont)
def hash_ref(self, k, env, cont):
from pycket.interpreter import return_value
return return_value(self.data.get(k, w_missing), env, cont)
def length(self):
return len(self.data)
class W_EqvMutableHashTable(W_SimpleMutableHashTable):
def make_empty(self):
return make_simple_mutable_table(W_EqvMutableHashTable)
def make_copy(self):
return W_EqvMutableHashTable(self.data.copy(), immutable=False)
@staticmethod
def hash_value(k):
return k.hash_eqv()
@staticmethod
def cmp_value(a, b):
return a.eqv(b)
def get_item(self, i):
return get_dict_item(self.data, i)
class W_EqMutableHashTable(W_SimpleMutableHashTable):
def make_copy(self):
return W_EqMutableHashTable(self.data.copy())
def make_empty(self):
return make_simple_mutable_table(W_EqMutableHashTable)
@staticmethod
def hash_value(k):
if isinstance(k, values.W_Fixnum):
return compute_hash(k.value)
if isinstance(k, values.W_Character):
return ord(k.value)
return compute_hash(k)
@staticmethod
def cmp_value(a, b):
from pycket.prims.equal import eqp_logic
return eqp_logic(a, b)
def get_item(self, i):
return get_dict_item(self.data, i)
W_EqvImmutableHashTable = make_persistent_hash_type(
super=W_ImmutableHashTable,
keytype=values.W_Object,
valtype=values.W_Object,
name="W_EqvImmutableHashTable",
hashfun=lambda x: r_uint(W_EqvMutableHashTable.hash_value(x)),
equal=W_EqvMutableHashTable.cmp_value)
W_EqImmutableHashTable = make_persistent_hash_type(
super=W_ImmutableHashTable,
keytype=values.W_Object,
valtype=values.W_Object,
name="W_EqImmutableHashTable",
hashfun=lambda x: r_uint(W_EqMutableHashTable.hash_value(x)),
equal=W_EqMutableHashTable.cmp_value)
class __extend__(W_EqvImmutableHashTable):
def length(self):
return len(self)
def make_copy(self):
return self
def make_empty(self):
return W_EqvImmutableHashTable.EMPTY
def hash_ref(self, k, env, cont):
from pycket.interpreter import return_value
result = self.val_at(k, w_missing)
return return_value(result, env, cont)
def hash_remove(self, key, env, cont):
from pycket.interpreter import return_value
removed = self.without(key)
return return_value(removed, env, cont)
def tostring(self):
assert type(self) is W_EqvImmutableHashTable
entries = [None] * len(self)
i = 0
for k, v in self.iteritems():
entries[i] = "(%s . %s)" % (k.tostring(), v.tostring())
i += 1
return "#hasheqv(%s)" % " ".join(entries)
class __extend__(W_EqImmutableHashTable):
def length(self):
return len(self)
def make_copy(self):
return self
def make_empty(self):
return W_EqImmutableHashTable.EMPTY
def hash_ref(self, key, env, cont):
from pycket.interpreter import return_value
result = self.val_at(key, w_missing)
return return_value(result, env, cont)
def hash_remove(self, key, env, cont):
from pycket.interpreter import return_value
removed = self.without(key)
return return_value(removed, env, cont)
def tostring(self):
assert type(self) is W_EqImmutableHashTable
entries = [None] * len(self)
i = 0
for k, v in self.iteritems():
entries[i] = "(%s . %s)" % (k.tostring(), v.tostring())
i += 1
return "#hasheq(%s)" % " ".join(entries)
|
ZipFile/papi.py
|
papi/helpers.py
|
Python
|
bsd-2-clause
| 356 | 0 |
import sys
if sys.version_info[0] == 2:
|
from urlparse import urljoin
string_types = basestring,
else:
from urllib.parse impor
|
t urljoin
string_types = str,
def atoi(string, default=0):
if (isinstance(string, int)):
return string
try:
return int(string)
except (TypeError, ValueError):
return default
|
xstrengthofonex/code-live-tutorials
|
python_web_development/database/handlers/base_handler.py
|
Python
|
mit
| 1,866 | 0.000536 |
import hmac
import config
from jinja2 import Environment, FileSystemLoader
jinja2_env = Environment(loader=FileSystemLoader(
config.TEMPLATE_DIRS), autoescape=True)
class BaseHandler(object):
def __init__(self):
self.request = None
self.response = None
def make_secure_value(self, value):
return "{}|{}".format(value, hmac.new(config.SECRET_KEY.encode(),
value.encode()).hexdigest())
def check_secure_value(self, secure_value):
val, hashed = secure_value.split('|', 1)
if secure_value == self.make_secure_value(val):
return True
return False
def set_secure_cookie(self, name, val):
cookie_val = self.make_secure_value(val)
self.response.set_cookie(name, cookie_val)
def get_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
if cookie_val and self.check_secure_value(cookie_val):
return cookie_val.split('|', 1)[0]
return None
def logi
|
n(self, user):
self.set_secu
|
re_cookie('username', str(user['username']))
def logout(self, user):
self.response.delete_cookie('username')
def write(self, text):
self.response.write(text)
def redirect(self, url, status=301):
self.response.status = status
self.response.location = url
def render(self, filename, **context):
template = jinja2_env.get_template(filename)
self.write(template.render(**context))
def __call__(self, request, response):
self.request = request
self.response = response
action = request.method.lower()
try:
method = getattr(self, action)
except AttributeError:
raise AttributeError("No action for {}".format(action))
method(**request.urlvars)
|
lduarte1991/edx-platform
|
common/djangoapps/third_party_auth/tests/test_admin.py
|
Python
|
agpl-3.0
| 3,696 | 0.001353 |
"""
Tests third_party_auth admin views
"""
import unittest
from django.contrib.admin.sites import AdminSite
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.forms import models
from student.tests.factories import UserFactory
from third_party_auth.admin import OAuth2ProviderConfigAdmin
from third_party_auth.models import OAuth2ProviderConfig
from third_party_auth.tests import testutil
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, testutil.AUTH_FEATURES_KEY + ' not enabled')
class Oauth2ProviderConfigAdminTest(testutil.TestCase):
"""
Tests for oauth2 provider config admin
"""
def test_oauth2_provider_edit_icon_image(self):
"""
Test that we can update an OAuth provider's icon image from the admin
form.
OAuth providers are updated using KeyedConfigurationModelAdmin, which
updates models by adding a new instance that replaces the old one,
instead of editing the old instance directly.
Updating the icon image is tricky here because
KeyedConfigurationModelAdmin copies data over from the previous
version by injecting its attributes into request.GET, but the icon
ends up in request.FILES. We need to ensure that the value is
prepopulated correctly, and that we can clear and update the image.
"""
# Login as a super user
user = UserFactory.create(is_staff=True, is_superuser=True)
user.save()
self.client.login(username=user.username, password='test')
# Get baseline provider count
providers = OAuth2ProviderConfig.objects.all()
pcount = len(providers)
# Create a provider
provider1 = self.configure_dummy_provider(
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
# Get the provider instance with active flag
providers = OAuth2ProviderConfig.objects.all()
self.assertEquals(len(providers), 1)
self.assertEquals(providers[pcount].id, provider1.id)
# Edit the provider via the admin edit link
admin = OAuth2ProviderConfigAdmin(provider1, AdminSite())
# pylint: disable=protected-access
update_url = reverse('admin:{}_{}_add'.format(admin.model._meta.app_label, admin.model._meta.model_name))
update_url += "?source={}".format(provider1.pk)
# Remove the icon_image from the POST data, to simulate unchanged icon_image
post_data = models.model_to_dict(provider1)
del post_data['icon_image']
# Remove max_session_length; it has a default null value which must be POSTed
# back as an absent value, rather than as a "null-like" included
|
value.
|
del post_data['max_session_length']
# Change the name, to verify POST
post_data['name'] = 'Another name'
# Post the edit form: expecting redirect
response = self.client.post(update_url, post_data)
self.assertEquals(response.status_code, 302)
# Editing the existing provider creates a new provider instance
providers = OAuth2ProviderConfig.objects.all()
self.assertEquals(len(providers), pcount + 2)
self.assertEquals(providers[pcount].id, provider1.id)
provider2 = providers[pcount + 1]
# Ensure the icon_image was preserved on the new provider instance
self.assertEquals(provider2.icon_image, provider1.icon_image)
self.assertEquals(provider2.name, post_data['name'])
|
repology/repology
|
repology/parsers/parsers/t2.py
|
Python
|
gpl-3.0
| 4,354 | 0.002067 |
# Copyright (C) 2019-2021 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import os
import re
from collections import defaultdict
from typing import Iterable
from repology.logger import Logger
from repology.package import PackageFlags
from repology.packagemaker import NameType, PackageFactory, PackageMaker
from repology.parsers import Parser
from repology.parsers.maintainers import extract_maintainers
from repology.parsers.patches import add_patch_files
from repology.parsers.walk import walk_tree
def _parse_descfile(path: str, logger: Logger) -> dict[str, list[str]]:
data: dict[str, list[str]] = defaultdict(list)
# http://t2sde.org/handbook/html/t2.package.desc.html
tag_map = {
'i': 'titl
|
e',
't': 'text',
|
'u': 'url',
'a': 'author',
'm': 'maintainer',
'c': 'category',
'f': 'flag',
'r': 'architecture',
'arch': 'architecture',
'k': 'kernel',
'kern': 'kernel',
'e': 'dependency',
'dep': 'dependency',
'l': 'license',
's': 'status',
'v': 'version',
'ver': 'version',
'p': 'priority',
'pri': 'priority',
'o': 'conf',
'd': 'download',
'down': 'download',
#'s': 'source', # duplicate - documentation is incorrect?
'src': 'source',
}
with open(path, 'r', encoding='latin1') as descfile:
for line in descfile:
line = line.strip()
if line.startswith('#'):
continue
match = re.fullmatch(r'\[([^\[\]]+)\]\s*(.*?)', line, re.DOTALL)
if match:
tag = match.group(1).lower()
tag = tag_map.get(tag, tag)
data[tag].append(match.group(2))
elif line:
logger.log('unexpected line "{}"'.format(line), Logger.WARNING)
return data
class T2DescParser(Parser):
def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]:
for desc_path in walk_tree(path, suffix='.desc'):
rel_desc_path = os.path.relpath(desc_path, path)
with factory.begin(rel_desc_path) as pkg:
pkgpath = os.path.dirname(rel_desc_path)
name = os.path.basename(pkgpath)
if name + '.desc' != os.path.basename(rel_desc_path):
raise RuntimeError('Path inconsistency (expected .../foo/foo.desc)')
data = _parse_descfile(desc_path, pkg)
pkg.add_name(name, NameType.T2_NAME)
pkg.add_name(pkgpath, NameType.T2_FULL_NAME)
pkg.set_version(data['version'][0])
pkg.set_summary(data['title'][0])
pkg.add_homepages((url.split()[0] for url in data.get('url', []) if url))
#pkg.add_homepages(data.get('cv-url')) # url used by version checker; may be garbage
pkg.add_licenses(data['license'])
pkg.add_maintainers(map(extract_maintainers, data['maintainer']))
pkg.add_categories(data['category'])
for cksum, filename, url, *rest in (line.split() for line in data.get('download', [])):
url = url.lstrip('-!')
if url.endswith('/'):
url += filename
if url.startswith('cvs') or url.startswith('git') or url.startswith('svn') or url.startswith('hg'):
# snapshots basically
pkg.set_flags(PackageFlags.UNTRUSTED)
pkg.add_downloads(url)
add_patch_files(pkg, os.path.dirname(desc_path), '*.patch')
yield pkg
|
ESS-LLP/frappe
|
frappe/integrations/doctype/paypal_settings/paypal_settings.py
|
Python
|
mit
| 12,324 | 0.022558 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
"""
# Integrating PayPal
### 1. Validate Currency Support
Example:
from frappe.integrations.utils import get_payment_gateway_controller
controller = get_payment_gateway_controller("PayPal")
controller().validate_transaction_currency(currency)
### 2. Redirect for payment
Example:
payment_details = {
"amount": 600,
"title": "Payment for bill : 111",
"description": "payment via cart",
"reference_doctype": "Payment Request",
"reference_docname": "PR0001",
"payer_email": "NuranVerkleij@example.com",
"payer_name": "Nuran Verkleij",
"order_id": "111",
"currency": "USD",
"payment_gateway": "Razorpay",
"subscription_details": {
"plan_id": "plan_12313", # if Required
"start_date": "2018-08-30",
"billing_period": "Month" #(Day, Week, SemiMonth, Month, Year),
"billing_frequency": 1,
"customer_notify": 1,
"upfront_amount": 1000
}
}
# redirect the user to this url
url = controller().get_payment_url(**payment_details)
### 3. On Completion of Payment
Write a method for `on_payment_authorized` in the reference doctype
Example:
def on_payment_authorized(payment_status):
# your code to handle callback
##### Note:
payment_status - payment gateway will put payment status on callback.
For paypal payment status parameter is one from: [Completed, Cancelled, Failed]
More Details:
<div class="small">For details on how to get your API credentials, follow this link: <a href="https://developer.paypal.com/docs/classic/api/apiCredentials/" target="_blank">https://developer.paypal.com/docs/classic/api/apiCredentials/</a></div>
"""
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from datetime import datetime
from frappe.utils import get_url, call_hook_method, cint, get_timestamp, cstr, now, date_diff, get_datetime
from six.moves.urllib.parse import urlencode
from frappe.model.document import Document
from frappe.integrations.utils import create_request_log, make_post_request, create_payment_gateway
api_path = '/api/method/frappe.integrations.doctype.paypal_settings.paypal_settings'
class PayPalSettings(Document):
supported_currencies = ["AUD", "BRL", "CAD", "CZK", "DKK", "EUR", "HKD", "HUF", "ILS", "JPY", "MYR", "MXN",
"TWD", "NZD", "NOK", "PHP", "PLN", "GBP", "RUB", "SGD", "SEK", "CHF", "THB", "TRY", "USD"]
def __setup__(self):
setattr(self, "use_sandbox", 0)
def setup_sandbox_env(self, token):
data = json.loads(frappe.db.get_value("Integration Request", token, "data"))
setattr(self, "use_sandbox", cint(frappe._dict(data).use_sandbox) or 0)
def validate(self):
create_payment_gateway("PayPal")
call_hook_method('payment_gateway_enabled', gateway="PayPal")
if not self.flags.ignore_mandatory:
self.validate_paypal_credentails()
def on_update(self):
pass
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. PayPal does not support transactions in currency '{0}'").format(currency))
def get_paypal_params_and_url(self):
params = {
"USER": self.api_username,
"PWD": self.get_password(fieldname="api_password", raise_exception=False),
"SIGNATURE": self.signature,
"VERSION": "98",
"METHOD": "GetPalDetails"
}
if hasattr(self, "use_sandbox") and self.use_sandbox:
params.update({
"USER": frappe.conf.sandbox_api_username,
"PWD": frappe.conf.sandbox_api_password,
"SIGNATURE": frappe.conf.sandbox_signature
})
api_url = "https://api-3t.sandbox.paypal.com/nvp" if (self.paypal_sandbox or self.use_sandbox) else "https://api-3t.paypal.com/nvp"
return params, api_url
def validate_paypal_credentails(self):
params, url = self.get_paypal_params_and_url()
params = urlencode(params)
try:
res = make_post_request(url=url, data=params.encode("utf-8"))
if res["ACK"][0] == "Failure":
raise Exception
except Exception:
frappe.throw(_("Invalid payment gateway credentials"))
def get_payment_url(self, **kwargs):
setattr(self, "use_sandbox", cint(kwargs.get("use_sandbox", 0)))
response = self.execute_set_express_checkout(**kwargs)
if self.paypal_sandbox or self.use_sandbox:
return_url = "https://www.sandbox.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}"
else:
return_url = "https://www.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}"
kwargs.update({
"token": response.get("TOKEN")[0],
"correlation_id": response.get("CORRELATIONID")[0]
})
self.integration_request = create_request_log(kwargs, "Remote", "PayPal", response.get("TOKEN")[0])
return return_url.format(kwargs["token"])
def execute_set_express_checkout(self, **kwargs):
params, url = self.get_paypal_params_and_url()
params.update({
"METHOD": "SetExpressCheckout",
"returnUrl": get_url("{0}.get_express_checkout_details".format(api_path)),
"cancelUrl": get_url("/payment-cancel"),
"PAYMENTREQUEST_0_PAYMENTACTION": "SALE",
"PAYMENTREQUEST_0_AMT": kwargs['amount'],
"PAYMENTREQUEST_0_CURRENCYCODE": kwargs['currency'].upper()
})
if kwargs.get('subscription_details'):
self.configure_recurring_payments(params, kwargs)
params = urlencode(params)
response = make_post_request(url, data=params.encode("utf-8"))
if response.get("ACK")[0] != "Success":
frappe.throw(_("Looks like something is wrong with this site's Paypal configuration."))
return response
def configure_recurring_payment
|
s(self, params, kwargs):
# removing the params as we have t
|
o setup rucurring payments
for param in ('PAYMENTREQUEST_0_PAYMENTACTION', 'PAYMENTREQUEST_0_AMT',
'PAYMENTREQUEST_0_CURRENCYCODE'):
del params[param]
params.update({
"L_BILLINGTYPE0": "RecurringPayments", #The type of billing agreement
"L_BILLINGAGREEMENTDESCRIPTION0": kwargs['description']
})
def get_paypal_and_transaction_details(token):
doc = frappe.get_doc("PayPal Settings")
doc.setup_sandbox_env(token)
params, url = doc.get_paypal_params_and_url()
integration_request = frappe.get_doc("Integration Request", token)
data = json.loads(integration_request.data)
return data, params, url
def setup_redirect(data, redirect_url, custom_redirect_to=None, redirect=True):
redirect_to = data.get('redirect_to') or None
redirect_message = data.get('redirect_message') or None
if custom_redirect_to:
redirect_to = custom_redirect_to
if redirect_to:
redirect_url += '?' + urlencode({'redirect_to': redirect_to})
if redirect_message:
redirect_url += '&' + urlencode({'redirect_message': redirect_message})
# this is done so that functions called via hooks can update flags.redirect_to
if redirect:
frappe.local.response["type"] = "redirect"
frappe.local.response["location"] = get_url(redirect_url)
@frappe.whitelist(allow_guest=True, xss_safe=True)
def get_express_checkout_details(token):
try:
doc = frappe.get_doc("PayPal Settings")
doc.setup_sandbox_env(token)
params, url = doc.get_paypal_params_and_url()
params.update({
"METHOD": "GetExpressCheckoutDetails",
"TOKEN": token
})
response = make_post_request(url, data=params)
if response.get("ACK")[0] != "Success":
frappe.respond_as_web_page(_("Something went wrong"),
_("Looks like something went wrong during the transaction. Since we haven't confirmed the payment, Paypal will automatically refund you this amount. If it doesn't, please send us an email and mention the Correlation ID: {0}.").format(response.get("CORRELATIONID", [None])[0]),
indicator_color='red',
http_status_code=frappe.ValidationError.http_status_code)
return
doc = frappe.get_doc("Integration Request", token)
update_integration_request_status(token, {
"payerid": response.get("PAYERID")[0],
"payer_email": response.get("EMAIL")[0]
}, "Authorized", doc=doc)
frappe.local.response["type"] = "redirect"
frappe.local.response["location"] = get_redirect_uri(doc, token, response.get("PAYERID")[0])
except Exception:
frappe.log_error(frappe.get_traceback())
@frappe.whitelis
|
dataxu/ansible
|
lib/ansible/modules/cloud/vmware/vmware_guest_find.py
|
Python
|
gpl-3.0
| 4,032 | 0.002232 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_guest_find
short_description: Find the folder path(s) for a virtual machine by name or UUID
description:
- Find the folder path(s) for a virtual machine by name or UUID
version_added: 2.4
author:
- James Tanner <tanner.jc@gmail.com>
- Abhijeet Kasurde <akasurde@redhat.com>
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with.
- This is required if C(uuid) parameter is not supplied.
uuid:
description:
- UUID of the instance to manage if known, this is VMware's BIOS UUID.
- This is required if C(name) parameter is not supplied.
datacenter:
description:
- Destination datacenter for the find operation.
- Deprecated in 2.5, will be removed in 2.9 release.
extends_documentation_fragment: vmware.documentation
'''
EXAMPL
|
ES = r'''
- name: Find Guest's Folder using name
vmware_guest_find:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
validate_certs: no
name: testvm
register: vm_folder
- name: Find Guest's Folder using UUID
vmware_guest_find:
hostname: 192.168.1.209
username: admin
|
istrator@vsphere.local
password: vmware
validate_certs: no
uuid: 38c4c89c-b3d7-4ae6-ae4e-43c5118eae49
register: vm_folder
'''
RETURN = r"""
folders:
description: List of folders for user specified virtual machine
returned: on success
type: list
sample: [
'/DC0/vm',
]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import PyVmomi, get_all_objs, vmware_argument_spec
try:
from pyVmomi import vim
except ImportError:
pass
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.name = self.params['name']
self.uuid = self.params['uuid']
def getvm_folder_paths(self):
results = []
# compare the folder path of each VM against the search path
vmList = get_all_objs(self.content, [vim.VirtualMachine])
for item in vmList.items():
vobj = item[0]
if not isinstance(vobj.parent, vim.Folder):
continue
# Match by name or uuid
if vobj.config.name == self.name or vobj.config.uuid == self.uuid:
folderpath = self.get_vm_path(self.content, vobj)
results.append(folderpath)
return results
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
datacenter=dict(removed_in_version=2.9, type='str')
)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['name', 'uuid']],
mutually_exclusive=[['name', 'uuid']],
)
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
folders = pyv.getvm_folder_paths()
# VM already exists
if folders:
try:
module.exit_json(folders=folders)
except Exception as exc:
module.fail_json(msg="Folder enumeration failed with exception %s" % to_native(exc))
else:
module.fail_json(msg="Unable to find folders for virtual machine %s" % (module.params.get('name') or
module.params.get('uuid')))
if __name__ == '__main__':
main()
|
xyalan/build-interpreter
|
docker_tools/docker_opt.py
|
Python
|
apache-2.0
| 2,738 | 0.002191 |
#coding=utf-8
from docker import Client
import time
import logging
from envir import config
import ast
import re
log = logging.getLogger(__name__)
class DockerOpt:
def __init__(self):
app_config = config.read_app_config()
self.app = app_config
self.url = app_config['docker']['url']
log.info("create docker with %s", self.url)
def gen_tag(self, branch, app_version, api_version):
now = time.localtime()
now_str = time.strftime("%Y%m%d%H%M%S", now)
if str(branch).startswith("develop"):
tag_name = api_version + "-" + app_version + "-d" + now_str
elif str(branch).startswith("feature/"):
tag_name = api_version + "-" + app_version + "-f" + now_str
elif str(branch).startswith("release/"):
tag_name = api_version + "-" + app_version + "-r" + now_str
elif str(branch).startswith("hot
|
fix/"):
tag_name = api_version + "-" + app_version + "-h" + now_str
else:
raise Exception('unsupported branch')
return tag_name
def gen_repository(self, registry, project_key, app_name):
return str(registry) + "/" + str(project_key) + "/" + str(app_name)
def build(self, path, tag):
"""
Similar
|
to the `docker interpreter`, interpreter a docker image
:param path: context path include Dockerfile
:param tag: image's tag
:return: None
"""
self.read_port()
version = self.app['docker']['api']['version']
cli = Client(base_url=self.url, version=str(version))
response = cli.build(path, tag, rm=True)
for line in response:
rp = {key: str(item.strip().decode('unicode_escape')) for key, item in ast.literal_eval(line).items()}
log.info(rp)
log.info("successful build image with dockerImageTag=%s", str(tag).split(':')[1])
def push_images(self, repository, tag=None):
version = self.app['docker']['api']['version']
cli = Client(base_url=self.url, version=str(version))
response = cli.push(repository, tag=tag, stream=True)
for line in response:
log.info(line)
def read_port(self):
with open('Dockerfile') as s:
con = s.read()
m = re.search('EXPOSE\s(.+)', con)
if m:
port = m.group(1)
ports = ','.join(port.split(' '))
log.info('read portsSymbol=%s', ports)
else:
raise Exception('Docker file not exists')
def rm_image(self, repo):
version = self.app['docker']['api']['version']
cli = Client(base_url=self.url, version=str(version))
cli.remove_image(repo)
|
jsfyfield/pyboids
|
gfx_boids.py
|
Python
|
gpl-2.0
| 1,867 | 0.024103 |
#!python2
from random import randint
from boids import *
import sys,pygame,time,copy
screenx = 800
screeny = 600
ticktime = 0.01
fps = 80
clock = pygame.time.Clock()
size = screenx,screeny
pygame.init()
screen = pygame.display.set_mode(size)
time = 0
def gen_boids(x,y,low,upper):
nboids = randint(low,upper)
boids = []
while nboids > 0:
boids.append(Boid(nboids, complex(randint(0, x), randint(0, y)), complex(randint(-100, 100),randint(-100,100)), 100, 100, 50))
nboids -= 1
return boids
boids = gen_boids(screenx,screeny,100,200)
while 1:
n=clock.tick(fps)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((250,250,250))
for i in xrange(len(boids)): # step the boids
boidaccel = boids
|
[i].step(boids,ticktime) # returns the accel
boids[i].pos = complex(boi
|
ds[i].pos.real % screenx, boids[i].pos.imag % screeny) # wrap around
# draw the acceleration vector
#thisarect = pygame.draw.aaline(background, (0,0,255), (boids[i].pos.real, boids[i].pos.imag),
# (boids[i].pos.real + boidaccel.real, boids[i].pos.imag + boidaccel.imag))
drawx = int(boids[i].pos.real)
drawy = int(boids[i].pos.imag)
endp = 9*(boids[i].v/abs(boids[i].v))
thisrect = pygame.draw.line(background, (0,0,0), (drawx,drawy), (drawx+endp.real,drawy+endp.imag), 3) # draw the boid
# draw all flock relationships
#for flockboid in boids[i].flock:
# linerect = pygame.draw.aaline(background, (255,0,0), (drawx, drawy),
# ((flockboid.pos.real),(flockboid.pos.imag)))
screen.blit(background, (0,0))
pygame.display.update()
x= pygame.quit()
|
gmimano/commcaretest
|
corehq/apps/commtrack/tests/test_sms_reporting.py
|
Python
|
bsd-3-clause
| 9,297 | 0.003442 |
from datetime import datetime
from casexml.apps.stock.models import StockReport, StockTransaction
from corehq.apps.commtrack.const import RequisitionStatus
from corehq.apps.commtrack.models import RequisitionCase
from casexml.apps.case.models import CommCareCase
from corehq.apps.commtrack.tests.util import CommTrackTest, bootstrap_user, FIXED_USER, ROAMING_USER
from corehq.apps.commtrack.sms import handle, SMSError
class StockReportTest(CommTrackTest):
user_definitions = [ROAMING_USER, FIXED_USER]
def setUp(self):
super(StockReportTest, self).setUp()
def testStockReportRoaming(self):
self.assertEqual(0, len(self.get_commtrack_forms()))
amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# soh loc1 pp 10 pq 20...
handled = handle(self.users[0].get_verified_number(), 'soh {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in amounts.items())
))
self.assertTrue(handled)
forms = list(self.get_commtrack_forms())
self.assertEqual(1, len(forms))
self.assertEqual(_get_location_from_sp(self.sp), _get_location_from_form(forms[0]))
# todo: right now this makes one report per balance when really they should all be in the same one
self.assertEqual(3, StockReport.objects.count())
for report in StockReport.objects.all():
self.assertEqual(forms[0]._id, report.form_id)
self.assertEqual('balance', report.type)
self.assertEqual(1, report.stocktransaction_set.count())
for code, amt in amounts.items():
[product] = filter(lambda p: p.code_ == code, self.products)
trans = StockTransaction.objects.get(product_id=product._id)
self.assertEqual(self.sp._id, trans.case_id)
self.assertEqual(0, trans.quantity)
self.assertEqual(amt, trans.stock_on_hand)
def testStockReportFixed(self):
self.assertEqual(0, len(self.get_commtrack_forms()))
amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# soh loc1 pp 10 pq 20...
handled = handle(self.users[1].get_verified_number(), 'soh {report}'.format(
report=' '.join('%s %s' % (k, v) for k, v in amounts.items())
))
self.assertTrue(handled)
forms = list(self.get_commtrack_forms())
self.assertEqual(1, len(forms))
self.assertEqual(_get_location_from_sp(self.sp), _get_location_from_form(forms[0]))
for code, amt in amounts.items():
[product] = filter(lambda p: p.code_ == code, self.products)
trans = StockTransaction.objects.get(product_id=product._id)
self.assertEqual(self.sp._id, trans.case_id)
self.assertEqual(0, trans.quantity)
self.assertEqual(amt, trans.stock_on_hand)
class StockRequisitionTest(object):
requisitions_enabled = True
user_definitions = [ROAMING_USER]
def setUp(self):
super(CommTrackTest, self).setUp()
self.user = self.users[0]
def testRequisition(self):
self.assertEqual(0, len(RequisitionCase.open_for_location(self.domain.name, self.loc._id)))
self.assertEqual(0, len(self.get_commtrack_forms()))
amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# req loc1 pp 10 pq 20...
handled = handle(self.user.get_verified_number(), 'req {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in amounts.items())
))
self.assertTrue(handled)
# make sure we got the updated requisitions
reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id)
self.assertEqual(3, len(reqs))
forms = list(self.get_commtrack_forms())
self.assertEqual(1, len(forms))
self.assertEqual(self.sp.location_, forms[0].location_)
# check updated status
for code, amt in amounts.items():
spp = CommCareCase.get(self.spps[code]._id)
# make sure the index was created
|
[req_r
|
ef] = spp.reverse_indices
req_case = RequisitionCase.get(req_ref.referenced_id)
self.assertEqual(str(amt), req_case.amount_requested)
self.assertEqual(self.user._id, req_case.requested_by)
self.assertEqual(req_case.location_, self.sp.location_)
self.assertTrue(req_case._id in reqs)
self.assertEqual(spp._id, req_case.get_product_case()._id)
def testApprovalBadLocations(self):
self.testRequisition()
try:
handle(self.user.get_verified_number(), 'approve')
self.fail("empty locations should fail")
except SMSError, e:
self.assertEqual('must specify a location code', str(e))
try:
handle(self.user.get_verified_number(), 'approve notareallocation')
self.fail("unknown locations should fail")
except SMSError, e:
self.assertTrue('invalid location code' in str(e))
def testSimpleApproval(self):
self.testRequisition()
# approve loc1
handled = handle(self.user.get_verified_number(), 'approve {loc}'.format(
loc='loc1',
))
self.assertTrue(handled)
reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id)
self.assertEqual(3, len(reqs))
for req_id in reqs:
req_case = RequisitionCase.get(req_id)
self.assertEqual(RequisitionStatus.APPROVED, req_case.requisition_status)
self.assertEqual(req_case.amount_requested, req_case.amount_approved)
self.assertEqual(self.user._id, req_case.approved_by)
self.assertIsNotNone(req_case.approved_on)
self.assertTrue(isinstance(req_case.approved_on, datetime))
self.assertEqual(req_case.product_id, req_case.get_product_case().product)
def testSimplePack(self):
self.testRequisition()
# pack loc1
handled = handle(self.user.get_verified_number(), 'pack {loc}'.format(
loc='loc1',
))
self.assertTrue(handled)
reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id)
self.assertEqual(3, len(reqs))
for req_id in reqs:
req_case = RequisitionCase.get(req_id)
self.assertEqual(RequisitionStatus.PACKED, req_case.requisition_status)
self.assertEqual(req_case.amount_requested, req_case.amount_packed)
self.assertEqual(self.user._id, req_case.packed_by)
self.assertIsNotNone(req_case.packed_on)
self.assertTrue(isinstance(req_case.packed_on, datetime))
self.assertEqual(req_case.product_id, req_case.get_product_case().product)
def testReceipts(self):
# this tests the requisition specific receipt keyword. not to be confused
# with the standard stock receipt keyword
self.testRequisition()
reqs = RequisitionCase.open_for_location(self.domain.name, self.loc._id)
self.assertEqual(3, len(reqs))
req_ids_by_product_code = dict(((RequisitionCase.get(id).get_product().code, id) for id in reqs))
rec_amounts = {
'pp': 30,
'pq': 20,
'pr': 10,
}
# rec loc1 pp 10 pq 20...
handled = handle(self.user.get_verified_number(), 'rec {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in rec_amounts.items())
))
self.assertTrue(handled)
# we should have closed the requisitions
self.assertEqual(0, len(RequisitionCase.open_for_location(self.domain.name, self.loc._id)))
forms = list(self.get_commtrack_forms())
self.assertEqual(2, len(forms))
self.assertEqual(self.sp.location_, forms[1].location_)
# check updated status
for code, amt in rec_amounts.items():
req_case = RequisitionCase.get(req_ids_by_product_code[code])
self.
|
dserv01/SyncLosslessToLossyMusicLibrary
|
SyncLosslessToLossyMusicLibrary.py
|
Python
|
gpl-2.0
| 6,785 | 0.005601 |
__author__ = 'Dominik Krupke, dserv01.de'
#
# While you want to listen to lossless music on your computer you may not be able to also listen to it mobile because
# it takes too much space. A 32GB-SDCard does not suffice for your full music library so you only have the options to
# either only hearing to a subset mobile or converting the music to a lossy format. This script is for the second
# option.
# * Of course you don't want the lossy files on your computer because you already have your perfect lossless music there.
# * If you extend your music library you want a simple way to also have them mobile.
# * You don't want to convert already converted music twice
# This script synchronizes a lossless library folder (e.g. your music folder on your computer) to a lossy library
# folder (e.g. the music folder of your mobile device) by checking if for all music files in your lossless folder there
# is a converted version in your lossy folder. If this is not the case the file is converted. On the other side it
# checks if you still have the lossless file for each lossy file in your library, otherwise this file is removed (so
# removing a file from your lossless library also removes it from your lossy library)
#
# You can use your own commands for converting specific files. These commands have to convert a single file (check the
# commands-array).
#
# The configuration is done with the from_path and the to_path, as well as with the commands-array.
import os
import subprocess
##### CONFIGURATION ###########################################################################################
# This is the path of your lossless libray, e.g. '/home/YOURNAME/Music/'
FROM_PATH = '/home/doms/THINKPAD-L450/Music/'
# This is the path of your lossy library, e.g. /mnt/SDCARD0/Music/'
TO_PATH = '/home/foldersync/MotoX-Play/Music/'
# Use [INPUT] and [OUTPUT] to build your commands. Both will be replaced by the full path but without the file extension,
# e.g. /home/doms/Music/Beethoven/FuerElise.flac -> /home/doms/Music/Beethoven/FuerElise
# You need to add the new and old fileextension for checking if the file is already converted and to remove old files
C
|
OMMANDS = [['flac', 'ogg', 'oggenc -q 8 [INPUT].flac -o [OUTPUT].ogg'],
['mp3', 'mp3', 'cp [INPUT].mp3 [OUTPUT].mp3']
# ,['jpg', 'jpg', 'cp [INPUT].jpg [OUTPUT].jpg']
]
# Remove files that are not in the original library
SYNC_DE
|
LETIONS = True
ASK_BEFORE_DELETE = False
###############################################################################################################
# Check if vorbis-tools are installed
output = subprocess.check_output("whereis oggenc", shell=True)
if (len(output) < 10):
print "You need to install vorbis-tools first (Debian/Ubuntu: sudo apt-get install vorbis-tools)"
print "If you don't use it, remove this check from the code"
exit(1)
# Check path format
if (FROM_PATH[-1] != '/' or TO_PATH[-1] != '/'):
print "Paths should end with \'/\'"
exit(1)
# Create library paths if not existence
try:
if (not os.path.exists(TO_PATH)):
os.makedirs(TO_PATH)
elif (os.path.isfile(TO_PATH)):
raise Exception("Directory is file?!")
except Exception as e:
print "Could not create " + TO_PATH + " because " + str(e)
print "Aborting"
exit(1)
# Create folders if not existing
def createFolder(subpath):
if (os.path.exists(TO_PATH + subpath) and os.path.isdir(TO_PATH + subpath)):
return True
try:
os.makedirs(TO_PATH + subpath)
return True
except Exception as e:
print "Could not create directory " + subpath+" because "+str(e)
return False
# Escape the paths for the os.system
def escapePath(s):
return s.replace(" ", "\ ").replace(")", "\)").replace("(", "\(").replace("&", "\&").replace("'", "\\\'")
# Go through all files and convert
for root, dirs, files in os.walk(FROM_PATH, topdown=False):
subpath = root[len(FROM_PATH):] + "/"
if (createFolder(subpath)):
for name in files:
filename_without_extension = os.path.splitext(name)[0]
file_extension = os.path.splitext(name)[1][1:]
source_path_without_extension = FROM_PATH + subpath + filename_without_extension
converted_path_without_extension = TO_PATH + subpath + filename_without_extension
# Get command tripple - sure you can do this more efficient with a hashmap but there will only be a few entries
command_tripple = None
for tripple in COMMANDS:
if (tripple[0] == file_extension):
command_tripple = tripple
break
if (not command_tripple):
continue
source_path = source_path_without_extension + "." + command_tripple[0]
goal_path = converted_path_without_extension + "." + command_tripple[1]
if (os.path.isfile(source_path)):
# If goal file does not exists or is older than source
if (not os.path.exists(goal_path) or os.path.getctime(source_path) > os.path.getctime(goal_path)):
print "Processing " + subpath + name
os.system(command_tripple[2].replace("[INPUT]", escapePath(source_path_without_extension)).replace(
"[OUTPUT]", escapePath(converted_path_without_extension)))
else:
print "Could not find " + subpath + name
# Remove old files
if (SYNC_DELETIONS):
for root, dirs, files in os.walk(TO_PATH, topdown=False):
subpath = root[len(TO_PATH):] + "/"
for name in files:
filename_without_extension = os.path.splitext(name)[0]
file_extension = os.path.splitext(name)[1][1:]
source_path_without_extension = FROM_PATH + subpath + filename_without_extension
converted_path_without_extension = TO_PATH + subpath + filename_without_extension
original_exists = False
for tripple in COMMANDS:
if (tripple[1] == file_extension and os.path.exists(source_path_without_extension + "." + tripple[0])):
original_exists = True
break
if (not original_exists):
filepath_to_delete = escapePath(converted_path_without_extension) + "." + file_extension
print "Deleting "+filepath_to_delete
os.system("rm " + ("-i " if ASK_BEFORE_DELETE else "") + filepath_to_delete)
# Remove old empty folders
for folder in dirs:
subpath = root[len(TO_PATH):] + "/"
if not os.path.exists(FROM_PATH + subpath + folder):
os.system("rmdir " + escapePath(TO_PATH + subpath + folder))
|
tiagocoutinho/bliss
|
bliss/tango/servers/nanobpm_ds.py
|
Python
|
lgpl-3.0
| 19,010 | 0.00526 |
# -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
import sys
import time
import numpy
import struct
import logging
import threading
# tango imports
import tango
from tango import GreenMode
from tango import DebugIt
from tango.server import run
from tango.server import Device
from tango.server import attribute, command
from tango.server import device_property
# Add additional imports
import gevent
from gevent import lock
from functools import wraps
from bliss.controllers.nano_bpm import NanoBpm as nanoBpm
def is_cmd_allowed(fisallowed):
def is_allowed(func):
@wraps(func)
def rfunc(self, *args, **keys):
if getattr(self, fisallowed)():
return func(self, *args, **keys)
else:
raise Exception("Command not allowed")
return rfunc
return is_allowed
class NanoBpm(Device):
CONTINUOUS, STREAMING = range(2)
BPP8, BPP16, BPP32 = range(3)
# -------------------------------------------------------------------------
# Device Properties
# -------------------------------------------------------------------------
CommandUrl = device_property(dtype=str,doc='use socket://192.999.999.999:2222')
ControlUrl = device_property(dtype=str, doc='use socket://192.999.999.999:2223')
Name = device_property(dtype=str, default_value="NanoBpm")
# -------------------------------------------------------------------------
# General methods
# -------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
self.__nanobpm = None
Device.__init__(self, *args, **kwargs)
@DebugIt()
def delete_device(self):
self._nanobpm = None
@DebugIt()
def init_device(self):
Device.init_device(self)
kwargs = {
'command_url': self.CommandUrl,
'control_url': self.ControlUrl,
}
self._nanoBpm = nanoBpm(self.Name, kwargs)
self._AcqMode2String = {
self.CONTINUOUS : 'continuous',
self.STREAMING : 'stream'
}
self.imageDepth2String = {
self.BPP8 : "bpp8",
self.BPP16 : "bpp16",
self.BPP32 : "bpp32"
}
self._logger = logging.getLogger(str(self))
logging.basicConfig(level=logging.INFO)
self._logger.setLevel(logging.DEBUG)
self._imageDepth = self.BPP8
self._imageData = None
self._lock = lock.Semaphore()
self._acqMode = self.STREAMING
self._CoG = None
self._xprofile = None
self._yprofile = None
self._xfit = None
self._yfit = None
# set up change events for Tango clients
self.set_change_event("Centre", True, False)
self.set_change_event("Xprofile", True, False)
self.set_change_event("Yprofile", True, False)
self.set_change_event("Xfit", True, False)
self.set_change_event("Yfit", True, False)
self.set_change_event("ReadImage8", True, False)
self.set_change_event("ReadImage16", True, False)
self.set_change_event("ReadImage32", True, False)
self._nanoBpm.subscribe(self.bpmCallback)
attr = self.get_device_attr().get_attr_by_name("acqMode")
attr.set_write_value(self._AcqMode2
|
String[self._acqMode])
attr = self.get_devic
|
e_attr().get_attr_by_name("imageDepth")
attr.set_write_value(self.imageDepth2String[self._imageDepth])
if self._nanoBpm is not None:
attr = self.get_device_attr().get_attr_by_name("gain")
attr.set_write_value(self._nanoBpm.GAIN)
attr = self.get_device_attr().get_attr_by_name("offset")
attr.set_write_value(self._nanoBpm.OFFSET)
attr = self.get_device_attr().get_attr_by_name("horizMinAmp")
attr.set_write_value(self._nanoBpm.H_MINAMP)
attr = self.get_device_attr().get_attr_by_name("vertMinAmp")
attr.set_write_value(self._nanoBpm.V_MINAMP)
attr = self.get_device_attr().get_attr_by_name("vertMinRSQ")
attr.set_write_value(self._nanoBpm.V_MINRSQ)
attr = self.get_device_attr().get_attr_by_name("horizMinRSQ")
attr.set_write_value(self._nanoBpm.H_MINRSQ)
attr = self.get_device_attr().get_attr_by_name("maxIter")
attr.set_write_value(self._nanoBpm.MAXITER)
self.set_state(tango.DevState.ON)
def always_executed_hook(self):
pass
# -------------------------------------------------------------------------
# Attributes
# -------------------------------------------------------------------------
@attribute(label="AcqMode", dtype=str,
description="Acquisition mode (continuous/stream)")
@DebugIt()
def acqMode(self):
return self._AcqMode2String[self._acqMode]
@acqMode.write
@DebugIt()
def acqMode(self, mode):
ind = self._AcqMode2String.values().index(mode)
self._acqMode = self._AcqMode2String.keys()[ind]
@attribute(label="Integration time", dtype=float, unit="s", min_value="0.0", memorized=True,
description="Integration time in seconds", fisallowed="is_attr_rw_allowed")
@DebugIt()
def integrationTime(self):
return self._nanoBpm.getIntegrationTime()
@integrationTime.write
@DebugIt()
def integrationTime(self, time):
self._nanoBpm.setIntegrationTime(time)
@attribute(label=" Subtract Background", dtype=bool, memorized=True, fisallowed="is_attr_rw_allowed",
description="To activate background subtraction (true = ON)")
@DebugIt()
def subtractBackground(self):
return self._nanoBpm.SUBTRACTDARK
@subtractBackground.write
@DebugIt()
def subtractBackground(self, enable):
self._nanoBpm.SUBTRACTDARK = 1 if enable else 0
@attribute(label="NbFramesToSum", dtype=int, hw_memorized=False, memorized=True, fisallowed="is_attr_rw_allowed",
description="Number frames to average or sum (must be power of 2. default=4")
@DebugIt()
def nbFramesToSum(self):
return self._nanoBpm.nbFramesToSum
@nbFramesToSum.write
@DebugIt()
def nbFramesToSum(self, num):
self._nanoBpm.nbFramesToSum = num
@attribute(label="Gain", dtype=int, fisallowed="is_attr_rw_allowed",
description="Gain of the device")
def gain(self):
return self._nanoBpm.GAIN
@gain.write
def gain(self, val):
self._nanoBpm.GAIN = val
@attribute(label="Offset", dtype=int, fisallowed="is_attr_rw_allowed",
description="Offset of the device")
def offset(self):
return self._nanoBpm.OFFSET
@offset.write
def offset(self, val):
self._nanoBpm.OFFSET = val
@attribute(label="Maximum Iterations", dtype=int, fisallowed="is_attr_rw_allowed",
description="Maximum number of iterations for the fitting algorithm")
def maxIter(self):
return self._nanoBpm.MAXITER
@maxIter.write
def maxIter(self, val):
self._nanoBpm.MAXITER = val
@attribute(label="Horizontal Minimum Amplitude", dtype=float, fisallowed="is_attr_rw_allowed",
description="")
def horizMinAmp(self):
return self._nanoBpm.H_MINAMP
@horizMinAmp.write
def horizMinAmp(self, val):
self._nanoBpm.H_MINAMP = val
@attribute(label="Vertical Minimum Amplitude", dtype=float, fisallowed="is_attr_rw_allowed",
description="Fitting minimum amplitude in vertical direction")
def vertMinAmp(self):
return self._nanoBpm.V_MINAMP
@vertMinAmp.write
def vertMinAmp(self, val):
self._nanoBpm.V_MINAMP = val
@attribute(label="Vertical Minimum Chi-squared", dtype=float, fisallowed="is_attr_rw_allowed",
description="Minimum chi-squared value for fitting in vertical direction")
def vertMinRSQ(self):
return self._nanoBpm.V_MINRSQ
@vertMinRSQ.wri
|
openpli-arm/enigma2-arm
|
lib/python/Plugins/Extensions/FactoryTest/NetworkTest.py
|
Python
|
gpl-2.0
| 16,179 | 0.033191 |
import os
import re
from os import system, popen, path as os_path, listdir
from Screens.Screen import Screen
from Components.Harddisk import *
from Components.Sources.StaticText import StaticText
from Components.ActionMap import ActionMap, NumberActionMap
from FactoryTestPublic import *
import time
from enigma import eTimer
from Components.Network import Network,iNetwork
from Components.Label import Label,MultiColorLabel
from Components.Pixmap import Pixmap,MultiPixmap
class NetworkTest(Screen):
skin = """
<screen name="About" position="220,57" size="840,605" title="About" flags="wfNoBorder">
<ePixmap position="0,0" zPosit
|
ion="-10" size="1100,605" pixmap="DMConcinnity-HD-Transp/menu/setupbg.png" />
<widget source="global.CurrentTime" render="Label
|
" position="20,20" size="80,25" font="Regular;23" foregroundColor="black" backgroundColor="grey" transparent="1">
<convert type="ClockToText">Default</convert>
</widget>
<widget source="global.CurrentTime" render="Label" position="110,20" size="140,25" font="Regular;23" foregroundColor="blue" backgroundColor="grey" transparent="1">
<convert type="ClockToText">Format:%d.%m.%Y</convert>
</widget>
<eLabel text="Network Test" position="270,20" size="540,43" font="Regular;35" halign="right" foregroundColor="black" backgroundColor="grey" transparent="1" />
<widget source="workstatus" render="Label" position="110,145" size="700,70" font="Regular;26" foregroundColor="yellow" backgroundColor="transpBlack" transparent="1" />
<widget source="testinfo" render="Label" position="120,230" size="660,330" font="Regular;15" backgroundColor="transpBlack" transparent="1" />
</screen>"""
def __init__(self,session,testitem):
Screen.__init__(self, session)
self.testserver = "8.8.8.8"
self.testitem = testitem
self._runing = False
self.result = False
self["workstatus"] = StaticText("Check the network cable is connected,Press OK\n key Start Test")
self["testinfo"] = StaticText("")
self["teststatus"] = StaticText("Start")
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.doclose,
"ok": self.startTest
})
self.testTimer = eTimer()
self.testTimer.callback.append(self.pingServer)
def startTest(self):
print "key green"
if self._runing == False:
self["workstatus"].setText("Network Connecting ....")
self["testinfo"].setText("")
self.testTimer.start(1000)
def doclose(self):
self.updeteResult()
self.close()
def updeteResult(self):
if self.result:
self.testitem.setTestResult(FactoryTestItem.TESTRESULT_OK)
else:
self.testitem.setTestResult(FactoryTestItem.TESTRESULT_ERROR)
def dhcp(self):
cmd = "ifconfig eth0 down"
p = os.popen(cmd)
info = p.read()
print "info read",info
def pingServer(self):
self.testTimer.stop()
self._runing = True
lost_packet = 100
try:
cmd = "ping -c 3 %s" % self.testserver
p = os.popen(cmd)
info = p.read()
print "info read",info
p.close()
self["testinfo"].setText(info)
except:
print "exception"
print "Network Connection Error!!! Check cable and Hardware"
self.result = False
if info == "":
self["workstatus"].setText("Network Connection Error!!! Check cable and Hardware")
print "Network Connection Error!!! Check cable and Hardware"
self.result = False
else:
try:
re_lost_str = '(\d+)% packet loss'
lost_packet = int(re.search(re_lost_str,info).group(1))
print "lost package is :",lost_packet
except:
self["workstatus"].setText("Network Connection Error!!! Check cable and Hardware")
print "Network Connection Error!!! Check cable and Hardware"
self.result = False
if lost_packet == 100:
self["workstatus"].setText("Network Connection Error!!! Check cable and Hardware")
print "Network Connection Error!!! Check cable and Hardware"
self.result = False
else:
self["workstatus"].setText("Network Connection OK")
print "Network Connection OK"
self.result = True
self._runing = False
return self.result
class WifiTest(Screen):
skin = """
<screen name="WifiTest" position="220,57" size="840,605" title="WifiTest" flags="wfNoBorder">
<ePixmap position="0,0" zPosition="-10" size="1100,605" pixmap="DMConcinnity-HD-Transp/menu/setupbg.png" />
<widget source="global.CurrentTime" render="Label" position="20,20" size="80,25" font="Regular;23" foregroundColor="black" backgroundColor="grey" transparent="1">
<convert type="ClockToText">Default</convert>
</widget>
<widget source="global.CurrentTime" render="Label" position="110,20" size="140,25" font="Regular;23" foregroundColor="blue" backgroundColor="grey" transparent="1">
<convert type="ClockToText">Format:%d.%m.%Y</convert>
</widget>
<eLabel text="Network test" position="270,20" size="540,43" font="Regular;35" halign="right" foregroundColor="black" backgroundColor="grey" transparent="1" />
<widget name="ConfigWifiText" position="70,100" size="400,25" zPosition="1" font="Regular;22" backgroundColor="transpBlack" transparent="1" />
<widget name="ConfigTestInfo" position="70,130" size="600,25" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" />
<widget name="ConfigTest_OK" position="730,100" size="35,27" pixmaps="DMConcinnity-HD-Transp/buttons/green.png,DMConcinnity-HD-Transp/buttons/red.png" zPosition="2" alphatest="blend" />
<eLabel position="70,168" size="700,2" backgroundColor="darkgrey" />
<widget name="DhcpText" position="70,180" size="400,25" zPosition="1" font="Regular;22" backgroundColor="transpBlack" transparent="1" />
<widget name="DhcpTestInfo" position="70,210" size="600,55" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" />
<widget name="DhcpTest_OK" position="730,180" size="35,27" pixmaps="DMConcinnity-HD-Transp/buttons/green.png,DMConcinnity-HD-Transp/buttons/red.png" zPosition="2" alphatest="blend" />
<eLabel position="70,278" size="700,2" backgroundColor="darkgrey" />
<widget name="connectText" position="70,290" size="400,25" zPosition="1" font="Regular;22" backgroundColor="transpBlack" transparent="1" />
<widget name="connectTestInfo" position="70,320" size="600,25" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" />
<widget name="connectTest_OK" position="730,290" size="35,27" pixmaps="DMConcinnity-HD-Transp/buttons/green.png,DMConcinnity-HD-Transp/buttons/red.png" zPosition="2" alphatest="blend" />
<eLabel position="70,358" size="700,2" backgroundColor="darkgrey" />
<widget name="opreateInfo" position="170,450" size="400,200" zPosition="1" font="Regular;22" foregroundColors="#8c8c93,#f23d21,#1cff1c" backgroundColor="transpBlack" transparent="1" />
</screen>"""
def __init__(self,session,testitem,testiface):
Screen.__init__(self, session)
self.testitem = testitem
self._runing = False
self.result = False
self.ipConsole = Console()
self.testiface = testiface
self.testitem = testitem
self.interfacename = "/etc/network/interfaces"
self.interfacebackupname = "/etc/network/interfacesbackup"
self.wlanconfigname = "/etc/wpa_supplicant.wlan0.test.conf"
self.testitem.setTestResult(FactoryTestItem.TESTRESULT_ERROR)
self.creatLables()
self.teststep = 0
self.retest = False
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.doclose,
"ok": self.startTest
})
self.testTimer = eTimer()
self.testTimer.callback.append(self.doTest)
def creatLables(self):
if iNetwork.isWirelessInterface(self.testiface):
self["ConfigWifiText"] = MultiColorLabel(_("WLAN connection config"))#
else:
self["ConfigWifiText"] = MultiColorLabel(_("LAN connection config"))#
self["ConfigTestInfo"] = MultiColorLabel()#Teststatus
# self["ConfigTestInfo"] = StaticText("")
self["ConfigTest_OK"] = MultiPixmap()#testicon
self["ConfigTest_OK"].hide()
self["DhcpText"] = MultiColorLabel(_("DHCP"))#
self["DhcpTestInfo"]
|
biomodels/BIOMD0000000370
|
BIOMD0000000370/model.py
|
Python
|
cc0-1.0
| 427 | 0.009368 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000370.xml')
with open(sbmlFilePath,'r') as
|
f:
sbmlString = f.read()
def module_exists(module_name)
|
:
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
weixsong/algorithm
|
leetcode/10.py
|
Python
|
mit
| 1,638 | 0.002463 |
#!/usr/bin/env python
"""
Regular Expression Matching
Implement regular expression matching with support for '.' and '*'.
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
The function prototype should be:
bool isMatch(const char *s, const char *p)
Some examples:
isMatch("aa","a") → false
isMatch("aa","aa") → true
isMatch("aaa","aa") → false
isMatch("aa", "a*") → true
isMatch("aa", ".*") → true
isMatch("ab",
|
".*") → tr
|
ue
isMatch("aab", "c*a*b") → true
"""
class Solution(object):
"""
O(n^2)
"""
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
m, n = len(p), len(s)
table = [[False for j in xrange(n + 1)] for i in xrange(m + 1)]
table[0][0] = True
# assue * is zero
for i in range(2, m + 1):
if p[i - 1] == '*':
table[i][0] = table[i - 2][0]
for i in range(1, m + 1):
for j in range(1, n + 1):
if p[i - 1] != '*':
if p[i - 1] == '.' or p[i - 1] == s[j - 1]:
table[i][j] = table[i - 1][j - 1]
else:
if table[i - 2][j] == True:
table[i][j] = True
else:
table[i][j] = table[i - 1][j]
if p[i - 2] == s[j - 1] or p[i - 2] == '.':
table[i][j] |= table[i][j - 1]
return table[-1][-1]
|
cupy/cupy
|
examples/stream/thrust.py
|
Python
|
mit
| 412 | 0 |
# nvprof --print-gpu-trace python examples/stream/thrust.py
import cupy
x =
|
cupy.array([1, 3, 2])
expected = x.sort()
cupy.cuda.Device().synchronize()
stream = cupy.cuda.stream.Stream()
with stream:
y = x.sort()
stream.synchronize()
cupy.testing.assert_array_equal(y, expected
|
)
stream = cupy.cuda.stream.Stream()
stream.use()
y = x.sort()
stream.synchronize()
cupy.testing.assert_array_equal(y, expected)
|
sernst/cauldron
|
cauldron/cli/server/run.py
|
Python
|
mit
| 4,787 | 0 |
import logging
import os
import site
import time
import typing
from argparse import ArgumentParser
import waitress
from flask import Flask
import cauldron as cd
from cauldron import environ
from cauldron import templating
from cauldron.render.encoding import ComplexFlaskJsonEncoder
from cauldron.session import writing
APPLICATION = Flask('Cauldron')
APPLICATION.json_encoder = ComplexFlaskJsonEncoder
SERVER_VERSION = [0, 0, 1, 1]
try:
site_packages = list(site.getsitepackages())
except Exception: # pragma: no cover
site_packages = []
active_execution_responses = dict() # type: typing.Dict[str, environ.Response]
server_data = dict(
version=SERVER_VERSION,
user=os.environ.get('USER'),
test=1,
pid=os.getpid()
)
authorization = {'code': ''}
def get_server_data() -> dict:
"""..."""
out = dict(
uptime=environ.run_time().total_seconds(),
cauldron_settings=environ.package_settings
)
out.update(server_data)
out.update(environ.systems.get_system_data())
return out
def get_running_step_changes(write: bool = False) -> list:
"""..."""
project = cd.project.get_internal_project()
running_steps = list(filter(
lambda step: step.is_running,
project.steps
))
def get_changes(step):
step_data = writing.step_writer.serialize(step)
if write:
writing.save(project, step_data.file_writes)
return dict(
name=step.definition.name,
action='updated',
step=step_data._asdict(),
timestamp=time.time(),
written=write
)
return [get_changes(step) for step in running_steps]
def parse(
args: typing.List[str] = None,
arg_parser: ArgumentParser = None
) -> dict:
"""Parses the arguments for the cauldron server"""
parser = arg_parser or create_parser()
return vars(parser.parse_args(args))
def create_parser(arg_parser: ArgumentParser = None) -> ArgumentParser:
"""
Creates an argument parser populated with the arg formats for the server
command.
"""
parser = arg_parser or ArgumentParser()
parser.description = 'Cauldron kernel server'
parser.add_argument(
'-p', '--port',
dest='port',
type=int,
default=5010
)
parser.add_argument(
'-d', '--debug',
dest='debug',
default=False,
action='store_true'
)
parser.add_argument(
'-v', '--version',
dest='version',
default=False,
action='store_true'
)
parser.add_argument(
'-c', '--code',
dest='authentication_code',
type=str,
default=''
)
parser.add_argument(
'-n', '--name',
dest='host',
type=str,
default=None
)
parser.add_argument(
'--basic',
action='store_true',
help="""
When specified a basic Flask server will be used to
serve the kernel instead of a waitress WSGI server.
Use only when necessary as the Flask server isn't
as robust.
"""
)
return parser
def create_application(
|
port: int = 5010,
debug: bool = False,
public: bool = False,
host=None,
authentication_code: str = '',
|
quiet: bool = False,
**kwargs
) -> dict:
"""..."""
if kwargs.get('version'):
environ.log('VERSION: {}'.format(environ.version))
return environ.systems.end(0)
if host is None and public:
host = '0.0.0.0'
server_data['host'] = host
server_data['port'] = port
server_data['debug'] = debug
server_data['id'] = environ.start_time.isoformat()
authorization['code'] = authentication_code if authentication_code else ''
if not debug:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
if not quiet:
templating.render_splash()
environ.modes.add(environ.modes.INTERACTIVE)
return {'application': APPLICATION, **server_data}
def execute(
port: int = 5010,
debug: bool = False,
public: bool = False,
host=None,
authentication_code: str = '',
quiet: bool = False,
**kwargs
):
"""..."""
populated_server_data = create_application(
port=port,
debug=debug,
public=public,
host=host,
authentication_code=authentication_code,
quiet=quiet,
**kwargs
)
app = populated_server_data['application']
if kwargs.get('basic'):
app.run(port=port, debug=debug, host=host)
else:
waitress.serve(app, port=port, host=host or 'localhost')
environ.modes.remove(environ.modes.INTERACTIVE)
|
milkey-mouse/swood
|
swood/complain.py
|
Python
|
mit
| 4,905 | 0.003262 |
"""User-friendly exception handler for swood."""
import http.client
import traceback
import sys
import os
__file__ = os.path.abspath(__file__)
class ComplainToUser(Exception):
"""When used with ComplaintFormatter, tells the user what error (of theirs) caused the failure and exits."""
pass
def can_submit():
if not os.path.isdir(os.path.expanduser("~/.swood")):
os.mkdir(os.path.expanduser("~/.swood"))
sbpath = os.path.expanduser("~/.swood/submit-bugs")
if os.path.isfile(sbpath):
try:
with open(sbpath) as sb:
resp = sb.read(1)
if resp == "1":
return 1
elif resp == "0":
return 0
except:
pass
while True:
resp = input(
"Something went wrong. Do you want to send an anonymous bug report? (Type Y or N): ").lower()
if resp in ("yes", "y", "true"):
try:
with open(sbpath, "w") as sb:
sb.write("1")
except:
pass
return 1
elif resp in ("no", "n", "false"):
try:
with open(sbpath, "w") as sb:
sb.write("0")
except:
pass
return 0
class ComplaintFormatter:
"""Notifies the user when the program fails predictably and uploads bug reports.
When used in a with statement, ComplaintFormatter catches all exceptions. If the
exception is a ComplainToUser exception, it will simply print the error message
and exit (with an exit code of 1). If the exception is something else (i.e. an
actual, unexpected exception), it will upload the traceback to the swood debug
server (unless the user has opted out of sending bug reports.)
"""
def __init__(self, version=None):
self.version = version
def __enter__(self):
pass
def __exit__(self, exc_type, exc, tb):
if isinstance(exc, ComplainToUser):
print("Error: {}".format(exc), file=sys.stderr)
sys.exit(1)
elif isinstance(exc, Exception):
# scrub stack of full path names for extra privacy
# also normalizes the paths, helping to detect dupes
scrubbed_stack = traceback.extract_tb(tb)
# cut off traces of stuff that isn't ours
others_cutoff = next(idx for idx, fs in enumerate(scrubbed_stack) if os.path.samefile(
os.path.dirname(fs.filename), os.path.dirname(__file__)))
scrubbed_stack = scrubbed_stack[others_cutoff:]
# rewrite paths so they contain only relative directories
# (hides username on Windows and Linux)
dirstart = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."))
for fs in scrubbed_stack:
fs.filename = os.path.relpath(
fs.filename, start=dirstart).replace("\\", "/")
str_tb = "Traceback (most recent call last):\n" + \
"".join(traceback.format_list(scrubbed_stack)) + \
"".join(traceback.format_exception_only(exc_type, exc))
if self.version is not None:
str_tb = "# " + self.version + "\n" + str_tb
if "--optout" in sys.argv or "-o" in sys.argv:
print(
"Something went wrong. A bug report will not be sent because of your command-line flag.", file=sys.stderr)
return False
elif os.environ.get("SWOOD_OPTOUT") == "1":
print(
"Something went wrong. A bug report will not be sent because of your environment variable.", file=sys.stderr)
|
return False
elif not can_submit():
print(
"Something went wrong. A bug report will not be sent because of
|
your config setting.", file=sys.stderr)
return False
else:
print(
"Something went wrong. A bug report will be sent to help figure it out. (see --optout)", file=sys.stderr)
try:
conn = http.client.HTTPSConnection("meme.institute")
conn.request("POST", "/swood/bugs/submit", str_tb)
resp = conn.getresponse().read().decode("utf-8")
if resp == "done":
print("New bug submitted!", file=sys.stderr)
elif resp == "dupe":
print(
"This bug is already in the queue to be fixed.", file=sys.stderr)
else:
raise Exception
except Exception:
print("Submission of bug report failed.", file=sys.stderr)
traceback.print_exc()
return True
|
mganeva/mantid
|
Testing/SystemTests/tests/analysis/EQSANSFlatTestAPIv2.py
|
Python
|
gpl-3.0
| 3,330 | 0.001201 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=no-init,attribute-defined-outside-init
import systemtesting
from mantid.simpleapi import *
from reduction_workflow.instruments.sans.sns_command_interface import *
from reduction_workflow.instruments.sans.hfir_command_interface import *
FILE_LOCATION = "/SNS/EQSANS/IPTS-5636/data/"
class EQSANSFlatTest(systemtesting.MantidSystemTest):
def requiredFiles(self):
files = []
files.append(FILE_LOCATION+"EQSANS_5704_event.nxs")
files.append(FILE_LOCATION+"EQSANS_5734_event.nxs")
files.append(FILE_LOCATION+"EQSANS_5732_event.nxs")
files.append(FILE_LOCATION+"EQSANS_5738_event.nxs")
files.append(FILE_LOCATION+"EQSANS_5729_event.nxs")
files.append(FILE_LOCATION+"EQSANS_5737_event.nxs")
files.append(FILE_LOCATION+"EQSANS_5703_event.nxs")
files.append("bl6_flux_at_sample")
return files
def runTest(self):
"""
System test for EQSANS.
This test i
|
s meant to be run at SNS and takes a long time.
It is used to verify that the complete reduction chain works
and reproduces reference results.
"""
configI = ConfigService.Instance()
configI["facilityName"]='SNS'
EQSANS()
S
|
olidAngle()
DarkCurrent(FILE_LOCATION+"EQSANS_5704_event.nxs")
TotalChargeNormalization(beam_file="bl6_flux_at_sample")
AzimuthalAverage(n_bins=100, n_subpix=1, log_binning=False)
IQxQy(nbins=100)
UseConfigTOFTailsCutoff(True)
PerformFlightPathCorrection(True)
UseConfigMask(True)
SetBeamCenter(89.6749, 129.693)
SensitivityCorrection(FILE_LOCATION+'EQSANS_5703_event.nxs',
min_sensitivity=0.5,
max_sensitivity=1.5, use_sample_dc=True)
DirectBeamTransmission(FILE_LOCATION+"EQSANS_5734_event.nxs",
FILE_LOCATION+"EQSANS_5738_event.nxs", beam_radius=3)
ThetaDependentTransmission(False)
AppendDataFile([FILE_LOCATION+"EQSANS_5729_event.nxs"])
CombineTransmissionFits(True)
Background(FILE_LOCATION+"EQSANS_5732_event.nxs")
BckDirectBeamTransmission(FILE_LOCATION+"EQSANS_5737_event.nxs",
FILE_LOCATION+"EQSANS_5738_event.nxs", beam_radius=3)
BckThetaDependentTransmission(False)
BckCombineTransmissionFits(True)
SaveIqAscii(process='None')
SetAbsoluteScale(277.781)
Reduce1D()
# This reference is old, ignore the first non-zero point and
# give the comparison a reasonable tolerance (less than 0.5%).
mtd['EQSANS_5729_event_frame1_Iq'].dataY(0)[1] = 856.30028119108
def validate(self):
self.tolerance = 5.0
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Axes')
return "EQSANS_5729_event_frame1_Iq", 'EQSANSFlatTest.nxs'
|
dajohnso/cfme_tests
|
utils/workloads.py
|
Python
|
gpl-2.0
| 3,720 | 0.008602 |
"""Functions for workloads."""
from utils.conf import cfme_performance
def get_capacity_and_utilization_replication_scenarios():
if 'test_cap_and_util_rep' in cfme_performance.get('tests', {}).get('workloads', []):
if (cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios'] and
len(
cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios']) > 0):
# Add Replication Master into Scenario(s):
for scn in cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios']:
scn['replication_master'] = cfme_performance['replication_master']
return cfme_performance['tests']['workloads']['test_cap_and_util_rep']['scenarios']
return []
def get_capacity_and_utilization_scenarios():
if 'test_cap_and_util' in cfme_performance.get('tests', {}).get('workloads', []):
if (cfme_performance['tests']['workloads']['test_cap_and_util']['scenarios'] and
len(cfme_performance['tests']['workloads']['test_cap_and_util']['scenarios']) > 0):
return cfme_performance['tests']['workloads']['test_cap_and_util']['scenarios']
return []
def get_idle_scenarios():
if 'test_idle' in cfme_performance.get('tests', {}).get('workloads', []):
if(cfme_performance['tests']['workloads']['test_idle']['scenarios'] and
len(cfme_performance['tests']['workloads']['test_idle']['scenarios']) > 0):
return cfme_performance['tests']['workloads']['test_idle']['scenarios']
return []
def get_provisioning_scenarios():
if 'test_provisioning' in cfme_performance.get('tests', {}).get('workloads', []):
if(cfme_performance['tests']['workloads
|
']['test_provisioning']['scenarios'] and
len(cfme_performance['tests']['workloads']['test_provisioning']['scenarios']) > 0):
return cfme_performance['tests']['workloads']['test_provisioning']['scenarios']
return []
def get_refresh_providers_scenarios():
if 'test_refresh_providers' in cfme_performance.get('tests
|
', {}).get('workloads', []):
if (cfme_performance['tests']['workloads']['test_refresh_providers']['scenarios'] and
len(
cfme_performance['tests']['workloads']['test_refresh_providers']['scenarios']) > 0):
return cfme_performance['tests']['workloads']['test_refresh_providers']['scenarios']
return []
def get_refresh_vms_scenarios():
if 'test_refresh_vms' in cfme_performance.get('tests', {}).get('workloads', []):
if (cfme_performance['tests']['workloads']['test_refresh_vms']['scenarios'] and
len(cfme_performance['tests']['workloads']['test_refresh_vms']['scenarios']) > 0):
return cfme_performance['tests']['workloads']['test_refresh_vms']['scenarios']
return []
def get_smartstate_analysis_scenarios():
if 'test_smartstate' in cfme_performance.get('tests', {}).get('workloads', []):
if(cfme_performance['tests']['workloads']['test_smartstate']['scenarios'] and
len(cfme_performance['tests']['workloads']['test_smartstate']['scenarios']) > 0):
return cfme_performance['tests']['workloads']['test_smartstate']['scenarios']
return []
def get_ui_single_page_scenarios():
if 'test_ui_single_page' in cfme_performance.get('tests', {}).get('ui_workloads', []):
if(cfme_performance['tests']['ui_workloads']['test_ui_single_page']['scenarios'] and
len(cfme_performance['tests']['ui_workloads']['test_ui_single_page']['scenarios']) > 0):
return cfme_performance['tests']['ui_workloads']['test_ui_single_page']['scenarios']
return []
|
Ingenico-ePayments/connect-sdk-python2
|
ingenico/connect/sdk/domain/payment/definitions/customer_account.py
|
Python
|
mit
| 10,943 | 0.005209 |
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.domain.payment.definitions.customer_account_authentication import CustomerAccountAuthentication
from ingenico.connect.sdk.domain.payment.definitions.customer_payment_activity import CustomerPaymentActivity
from ingenico.connect.sdk.domain.payment.definitions.payment_account_on_file import PaymentAccountOnFile
class CustomerAccount(DataObject):
"""
| Object containing data related to the account the customer has with you
"""
__authentication = None
__change_date = None
__changed_during_checkout = None
__create_date = None
__had_suspicious_activity = None
__has_forgotten_password = None
__has_password = None
__password_change_date = None
__password_changed_during_checkout = None
__payment_account_on_file = None
__payment_account_on_file_type = None
__payment_activity = None
@property
def authentication(self):
"""
| Object containing data on the authentication used by the customer to access their account
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.customer_account_authentication.CustomerAccountAuthentication`
"""
return self.__authentication
@authentication.setter
def authentication(self, value):
self.__authentication = value
@property
def change_date(self):
"""
| The last date (YYYYMMDD) on which the customer made changes to their account with you. These are changes to billing & shipping address details, new payment account (tokens), or new users(s) added.
Type: str
"""
return self.__change_date
@change_date.setter
def change_date(self, value):
self.__change_date = value
@property
def changed_during_checkout(self):
"""
| true = the customer made changes to their account during this checkout
| false = the customer didn't change anything to their account during this checkout/n
| The changes ment here are changes to billing & shipping address details, new payment account (tokens), or new users(s) added.
Type: bool
"""
return self.__changed_during_checkout
@changed_during_checkout.setter
def changed_during_checkout(self, value):
self.__changed_during_checkout = value
@property
def create_date(self):
"""
| The date (YYYYMMDD) on which the customer created their account with you
Type: str
"""
return self.__create_date
@create_date.setter
def create_date(self, value):
self.__create_date = value
@property
def had_suspicious_activity(self):
"""
| Specifies if you have experienced suspicious activity on the account of the customer
| true = you have experienced suspicious activity (including previous fraud) on the customer account used for this transaction
| false = you have experienced no suspicious activity (including previous fraud) on the customer account used for this transaction
Type: bool
"""
return self.__had_suspicious_activity
@had_suspicious_activity.setter
def had_suspicious_activity(self, value):
self.__had_suspicious_activity = value
@property
def has_forgotten_password(self):
"""
| Specifies if the customer (initially) had forgotten their password
* true - The customer has forgotten their password
* false - The customer has not forgotten their password
Type: bool
"""
return self.__has_forgotten_password
@has_forgotten_password.setter
def has_forgotten_password(self, value):
self.__has_forgotten_password = value
@property
def has_password(self):
"""
| Specifies if the customer entered a password to gain access to an account registered with the you
* true - The customer has used a password to gain access
* false - The customer has not used a password to gain access
Type: bool
"""
return self.__has_password
@has_password.setter
def has_password(self, value):
self.__has_password = value
@property
def password_change_date(self):
"""
| The last date (YYYYMMDD) on which the customer changed their password for the account used in this transaction
Type: str
"""
return self.__password_change_date
@password_change_date.setter
def password_change_date(self, value):
self.__password_change_date = value
@property
def password_changed_during_checkout(self):
"""
| Indicates if the password of an account is changed during this checkout
| true = the customer made changes to their password of the account used during this checkout
| alse = the customer didn't change anything to their password of the account used during this checkout
Type: bool
"""
return self.__password_changed_during_checkout
@password_changed_during_checkout.setter
def password_changed_during_checkout(self, value):
self.__password_changed_during_checkout = value
@property
def payment_account_on_file(self):
"""
| Object containing information on the payment account data on file (tokens)
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.payment_account_on_file.PaymentAccountOnFile`
"""
return self.__payment_account_on_file
@payment_account_on_file.setter
def payment_account_on_file(self, value):
self.__payment_account_on_file = value
@property
def payment_account_on_file_type(self):
"""
| Indicates the type of account. For example, for a multi-account card product.
* not-applicable = the card used doesn't support multiple card products
* credit = the card used is a credit card
* debit = the card used is a debit card
Type: str
"""
return self.__payment_account_on_file_type
@payment_account_on_file_type.setter
def payment_account_on_file_type(self, value):
self.__payment_account_on_file_type = value
@property
def payment_activity(self):
"""
| Object containing data on the purchase history of the customer with you
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.customer_payment_activity.CustomerPaymentActivity`
"""
return self.__payment_activity
@payment_activity.setter
def payment_activity(self, value):
self.__payment_activity = value
def to_dictionary(self):
dictionary = super(CustomerAccount, self).to_dictionary()
if self.authentication is not None:
dictionary['authentication'] = self.authentication.to_dictionary()
if self.change_date is not None:
dictionary['changeDate'] = self.change_date
if self.changed_during_checkout is not None:
dictionary['changedDuringCheckout'] = self.changed_during_checkout
if self.create_date is not None:
dictionary['createDate'] = self.create_date
if self.had_suspicious_activity is not None:
dictionary['hadSuspiciousActivity'] = self.had_suspicious_activity
if
|
self.has_forgotten_password is not None:
dictionary['hasForgottenPassword'] = self.has_forgotten_password
if self.has_password is not None:
dictionary['hasPassword'] = self.has_password
if self.password_change_date is not None:
dictionary['passwordChangeDate'] = self.password_change_date
if self.passwor
|
d_changed_during_checkout is not None:
dictionary['passwordChangedDuringCheckout'] = self.password_chang
|
hazelnusse/sympy-old
|
sympy/printing/gtk.py
|
Python
|
bsd-3-clause
| 498 | 0.008032 |
from sympy import Basic
from sympy.printing.mathml import mathml
import tempfile
import os
def print_gtk(x, start_viewer=True):
|
"""Print to Gtkmathview, a gtk widget capable of rendering MathML.
Needs libgtkmathview-bin"""
from sympy.utilities.mathml import c2p
tmp = tempfile.mktemp() # create a temp file to store the result
file = open(tmp, 'wb')
file.write( c2p(mathml(x), simple=True) )
file.clo
|
se()
if start_viewer:
os.system("mathmlviewer " + tmp)
|
HERA-Team/hera_mc
|
alembic/versions/5feda4ca9935_add_rtp_task_multiple_process_event_table.py
|
Python
|
bsd-2-clause
| 1,093 | 0.000915 |
"""Add rtp_task_multiple_process_event table
Revision ID: 5feda4ca9935
Revises: 9d9af47e64c8
Create Date: 2021-09-30 16:22:30.118641+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "5feda4ca9935"
down_revision = "9d9af47e64c8"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"rtp_task_multiple_process_event",
sa.Column("time", sa.BigInteger(), nullable=False),
sa.Column("obsid_start", sa.BigInteger(), nullable=False),
sa.Column("task_na
|
me", sa.Text(), nullable=False),
sa.Column(
"event",
sa.Enum(
"started", "finished", "error", name="rtp_task_multiple_process_enum"
),
nullable=False,
),
sa.ForeignKeyConstraint(
["obsid_start"],
["hera_obs.obsid"],
),
sa.PrimaryKeyConstraint("time", "obsid_start", "task_name"),
)
def downgrade():
op.drop_table("
|
rtp_task_multiple_process_event")
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_azure_firewalls_operations.py
|
Python
|
mit
| 26,909 | 0.004645 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations(object):
"""AzureFirewallsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
azure_fi
|
rewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._seriali
|
ze.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AzureFirewall"
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall
:raises: ~azure.core.
|
regilero/HTTPWookiee
|
httpwookiee/http/client.py
|
Python
|
gpl-3.0
| 6,832 | 0.000146 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from httpwookiee.config import ConfigFactory
from httpwookiee.core.tools import Tools, outmsg, inmsg
from httpwookiee.http.parser.responses import Responses
import socket
import ipaddress
import ssl
import six
class ClosedSocketError(Exception):
"""Raise this when the tcp/ip connection is unexpectedly closed."""
class Client(object):
"""Main HTTP Client, HTTP request launcher."""
hostip = None
port = None
host = b''
https = False
_sock = None
_hostip = False
def __init__(self, host=None, port=None, hostip=None):
"""Ensure settings are ready."""
self.config = ConfigFactory.getConfig()
if host is None:
self.host = self.config.get('SERVER_HOST')
else:
self.host = host
if port is None:
self.port = self.config.getint('SERVER_PORT')
else:
self.port = port
self.hostip = hostip
if self.hostip is None and '' != self.config.get('SERVER_IP'):
self.hostip = self.config.get('SERVER_IP')
self.https = self.config.getboolean('SERVER_SSL')
self._sock = None
def __enter__(self):
"""Launch the socket opening."""
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Send a socket close."""
return self.close()
def open(self):
"""Open client socket connection."""
if self.hostip is None:
outmsg('# searching host IP (DNS) for {0} '.format(self.host))
self.hostip = socket.getaddrinfo(self.host, self.port)[0][4][0]
self._ci()
try:
if not self._hostip:
raise Exception(u'\u0262\u0046\u0059')
outmsg(
'# Connecting to Host: {0} IP: {1} PORT: {2}'.format(
self.host, self.hostip, self.port))
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(10)
except socket.error as msg:
outmsg("[ERROR] {0}".format(str(msg)))
raise Exception('error creating socket')
outmsg('# socket ok')
if self.https:
try:
outmsg('# Establishing SSL layer')
self._sock = ssl.wrap_socket(self._sock,
cert_reqs=ssl.CERT_NONE)
except:
outmsg("[SSL ERROR]")
raise Exception('error establishing SSL connection')
try:
self._sock.connect((self.hostip, self.port))
except socket.error as msg:
outmsg("[ERROR] {0}".format(str(msg)))
raise Exception('error establishing socket connect')
outmsg('# client connection established.')
def close_sending(self):
"""First closing step, cut the sending part of the socket."""
try:
outmsg('# closing client connection send canal '
'(can still receive).')
self._sock.shutdown(socket.SHUT_WR)
except OSError:
raise ClosedSocketError('closed socket detected on send close')
def close(self):
"""Ensure the tcp/ip socket is really closed."""
if self._sock is not None:
outmsg('# closing client connection.')
try:
self._sock.shutdown(socket.SHUT_RDWR)
except Exception:
# already closed
pass
self._sock.close()
self._sock = None
def _ci(self):
self._hostip = ipaddress.ip_address(self.hostip).is_private
def send(self, request):
"""Send given request on the socket, support delayed emission."""
msg = request.getBytesStream()
msglen = len(msg)
outmsg('# SENDING ({0}) =====>'.format(msglen))
# here we use the not-so real format (special bytes are not
# replaced in str(), only in getBytesStream())
Tools.print_message(six.text_type(request), cleanup=True)
try:
self._socket_send(msg)
except socket.error as errmsg:
outmsg('#<====ABORTED COMMUNICATION WHILE'
' SENDING {0}\n#{1}'.format(six.text_type(msg), errmsg))
return
while request.is_delayed:
msg = request.getDelayedOutput()
msglen = len(msg)
outmsg('# SENDI
|
NG Delayed ({0}) =====>'.format(msglen))
# hopefully we do not use strange bytes in delayed chunks for now
Tools.print_message(six.text_type(msg), cleanup=True)
try:
self._socket_send(msg)
except socket.error as errms
|
g:
outmsg('#<====ABORTED COMMUNICATION WHILE'
' SENDING (delayed) '
'{0}\r\n#{1}'.format(six.text_type(msg),
errmsg))
return
def read_all(self, timeout=None, buffsize=None):
"""Read all the stream, waiting for EOS, return all responses."""
output = ''
if timeout is None:
timeout = float(self.config.getint(
'CLIENT_SOCKET_READ_TIMEOUT_MS'))
timeout = timeout / 1000
if buffsize is None:
buffsize = self.config.getint('CLIENT_SOCKET_READ_SIZE')
try:
output = self._socket_read(timeout, buffsize)
except socket.error as msg:
inmsg('#<====ABORTED RESPONSE WHILE READING: {0}'.format(str(msg)))
inmsg('# <====FINAL RESPONSE===============')
inmsg(output)
responses = Responses().parse(output)
return responses
def _socket_send(self, message):
msglen = len(message)
totalsent = 0
outmsg('# ====================>')
while totalsent < msglen:
outmsg('# ...')
sent = self._sock.send(message[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def _socket_read(self, timeout, buffsize):
inmsg('# <==== READING <===========')
read = b''
# we use blocking socket, set short timeouts if you want
# to detect end of response streams
if 0 == timeout:
self._sock.settimeout(None)
else:
self._sock.settimeout(timeout)
try:
# blocking read
data = self._sock.recv(buffsize)
while (len(data)):
inmsg('# ...')
read += data
data = self._sock.recv(buffsize)
except socket.timeout:
inmsg('# read timeout({0}), nothing more is coming'.format(
timeout))
return read
|
acshi/osf.io
|
api_tests/base/test_versioning.py
|
Python
|
apache-2.0
| 6,291 | 0.003656 |
from nose.tools import * # flake8: noqa
from api.base import settings
from tests.base import ApiTestCase
# The versions below are specifically for testing purposes and do not reflect the actual versioning of the API.
# If changes are made to this list, or to DEFAULT_VERSION, please reflect those changes in:
# api/base/settings/local-travis.py so that travis tests will pass.
TESTING_ALLOWED_VERSIONS = (
'2.0',
'2.0.1',
'2.1',
'2.2',
'3.0',
'3.0.1',
)
DEFAULT_VERSION = '2.0'
class VersioningTestCase(ApiTestCase):
def setUp(self):
super(VersioningTestCase, self).setUp()
self.valid_url_path_version = '2.0'
self.valid_header_version = '2.0.1'
self.valid_query_parameter_version = '2.1'
self.invalid_url_path_version = '1.0'
self.invalid_header_version = '1.0.1'
self.invalid_query_parameter_version = '1.1'
self.valid_url_path_version_url = '/v2/'
self.invalid_url_path_version_url = '/v1/'
self.valid_query_parameter_version_url = '/v2/?version={}'.format(self.valid_query_parameter_version)
self.invalid_query_parameter_version_url = '/v2/?version={}'.format(self.invalid_query_parameter_version)
self._ALLOWED_VERSIONS = settings.REST_FRAMEWORK['ALLOWED_VERSIONS']
self._DEFAULT_VERSION = settings.REST_FRAMEWORK['DEFAULT_VERSION']
settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = TESTING_ALLOWED_VERSIONS
settings.REST_FRAMEWORK['DEFAULT_VERSION'] = DEFAULT_VERSION
def tearDown(self):
super(VersioningTestCase, self).tearDown()
settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = self._ALLOWED_VERSIONS
settings.REST_FRAMEWORK['DEFAULT_VERSION'] = self._DEFAULT_VERSION
class TestBaseVersioning(VersioningTestCase):
def s
|
etUp(self):
super(TestBaseVersioning, self).setUp()
def test_url_path_version(self):
res = self.app.get(self.valid_url_path_version_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_url_path_version)
|
def test_header_version(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
res = self.app.get(self.valid_url_path_version_url, headers=headers)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_header_version)
def test_query_param_version(self):
res = self.app.get(self.valid_query_parameter_version_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_query_parameter_version)
def test_url_path_version_not_in_allowed_versions(self):
res = self.app.get(self.invalid_url_path_version_url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_header_version_not_in_allowed_versions(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.invalid_header_version)}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 406)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.')
def test_query_param_version_not_in_allowed_versions(self):
res = self.app.get(self.invalid_query_parameter_version_url, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.')
def test_query_parameter_version_not_within_url_path_major_version(self):
url = '/v2/?version=3.0.1'
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in query parameter does not fall within URL path version {}'.format(
'3.0.1',
self.valid_url_path_version
)
)
def test_header_version_not_within_url_path_major_version(self):
headers = {'accept': 'application/vnd.api+json;version=3.0.1'}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in "Accept" header does not fall within URL path version {}'.format(
'3.0.1',
self.valid_url_path_version
)
)
def test_header_version_and_query_parameter_version_match(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
url = '/v2/?version={}'.format(self.valid_header_version)
res = self.app.get(url, headers=headers)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_header_version)
def test_header_version_and_query_parameter_version_mismatch(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
url = '/v2/?version={}'.format(self.valid_query_parameter_version)
res = self.app.get(url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in "Accept" header does not match version {} specified in query parameter'.format(
self.valid_header_version,
self.valid_query_parameter_version
)
)
def test_header_version_bad_format(self):
headers = {'accept': 'application/vnd.api+json;version=not_at_all_a_version'}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 406)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.')
def test_query_version_bad_format(self):
url = '/v2/?version=not_at_all_a_version'
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.')
|
alvinkatojr/django-db-log
|
djangodblog/models.py
|
Python
|
bsd-3-clause
| 5,442 | 0.006248 |
from django.conf import settings as dj_settings
from django.db import models, transaction
from django.core.signals import got_request_exception
from django.http import Http404
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
from djangodblog import settings
from djangodblog.manager import DBLogManager, ErrorBatchManager
from djangodblog.utils import JSONDictField
from djangodblog.helpers import construct_checksum
import datetime
import warnings
import logging
import sys
try:
from idmapper.models import SharedMemoryModel as Model
except ImportError:
Model = models.Model
logger = logging.getLogger('dblog')
__all__ = ('Error', 'ErrorBatch')
LOG_LEVELS = (
(logging.INFO, _('info')),
(logging.WARNING, _('warning')),
(logging.DEBUG, _('debug')),
(logging.ERROR, _('error')),
(logging.FATAL, _('fatal')),
)
STATUS_LEVELS = (
(0, _('unresolved')),
(1, _('resolved')),
)
class ErrorBase(Model):
logger = models.CharField(max_length=64, blank=True, default='root', db_index=True)
class_name = models.CharField(_('type'), max_length=128, blank=True, null=True, db_index=True)
level = models.PositiveIntegerField(choices=LOG_LEVELS, default=logging.ERROR, blank=True, db_index=True)
message = models.TextField()
traceback = models.TextField(blank=True, null=True)
url = models.URLField(null=True, blank=True)
server_name = models.CharField(max_length=128, db_index=True)
checksum = models.CharField(max_length=32, db_index=True)
objects = DBLogManager()
class Meta:
abstract = True
def get_absolute_url(self):
return self.url
def shortened_url(self):
if not self.url:
return _('no data')
url = self.url
if len(url) > 60:
url = url[:60] + '...'
return url
shortened_url.short_description = _('url')
shortened_url.admin_order_field = 'url'
def full_url(sel
|
f):
return self.data.get('url'
|
) or self.url
full_url.short_description = _('url')
full_url.admin_order_field = 'url'
def error(self):
message = smart_unicode(self.message)
if len(message) > 100:
message = message[:97] + '...'
if self.class_name:
return "%s: %s" % (self.class_name, message)
return message
error.short_description = _('error')
def description(self):
return self.traceback or ''
description.short_description = _('description')
class ErrorBatch(ErrorBase):
# XXX: We're using the legacy column for `is_resolved` for status
status = models.PositiveIntegerField(default=0, db_column="is_resolved", choices=STATUS_LEVELS)
times_seen = models.PositiveIntegerField(default=1)
last_seen = models.DateTimeField(default=datetime.datetime.now, db_index=True)
first_seen = models.DateTimeField(default=datetime.datetime.now, db_index=True)
objects = ErrorBatchManager()
class Meta:
unique_together = (('logger', 'server_name', 'checksum'),)
verbose_name_plural = _('summaries')
verbose_name = _('summary')
def __unicode__(self):
return "(%s) %s: %s" % (self.times_seen, self.class_name, self.error())
def natural_key(self):
return (self.logger, self.server_name, self.checksum)
@staticmethod
@transaction.commit_on_success
def handle_exception(sender, request=None, **kwargs):
try:
exc_type, exc_value, traceback = sys.exc_info()
if not settings.CATCH_404_ERRORS \
and issubclass(exc_type, Http404):
return
if dj_settings.DEBUG or getattr(exc_type, 'skip_dblog', False):
return
if transaction.is_dirty():
transaction.rollback()
if request:
data = dict(
META=request.META,
POST=request.POST,
GET=request.GET,
COOKIES=request.COOKIES,
)
else:
data = dict()
extra = dict(
url=request and request.build_absolute_uri() or None,
data=data,
)
if settings.USE_LOGGING:
logging.getLogger('dblog').critical(exc_value, exc_info=sys.exc_info(), extra=extra)
else:
Error.objects.create_from_exception(**extra)
except Exception, exc:
try:
logger.exception(u'Unable to process log entry: %s' % (exc,))
except Exception, exc:
warnings.warn(u'Unable to process log entry: %s' % (exc,))
class Error(ErrorBase):
datetime = models.DateTimeField(default=datetime.datetime.now, db_index=True)
data = JSONDictField(blank=True, null=True)
class Meta:
verbose_name = _('message')
verbose_name_plural = _('messages')
def __unicode__(self):
return "%s: %s" % (self.class_name, smart_unicode(self.message))
def save(self, *args, **kwargs):
if not self.checksum:
self.checksum = construct_checksum(self)
super(Error, self).save(*args, **kwargs)
got_request_exception.connect(ErrorBatch.handle_exception)
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-nest-asyncio/package.py
|
Python
|
lgpl-2.1
| 633 | 0.004739 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for detai
|
ls.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyNestAsyncio(PythonPackage):
"""Patch asyncio to allow nested event loops."""
homepage = "https://github.com/erdewit/nest_asyncio"
url = "https://pypi.io/packages/source/n/nest-asyncio/nest_asyncio-1.4.0.tar.gz"
version('1.4.0', sha256='5773054bbc14579b000236f85bc01ecced7ffd045ec8ca4a9809371ec65a59c8')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-
|
setuptools', type='build')
|
zackurtz/box-maker
|
boxmaker.py
|
Python
|
gpl-3.0
| 10,763 | 0.047849 |
#! /usr/bin/env python
'''
Generates Inkscape SVG file containing box components needed to create several different
types of laser cut tabbed boxes.
Derived from original version authored by elliot white - elliot@twot.eu
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
__version__ = "0.1" ### please report bugs at https://github.com/zackurtz/box-maker/issues ###
import sys
import inkex, simplestyle, gettext
_ = gettext.gettext
def drawS(XYstring): # Draw lines from a list
name='part'
style = { 'stroke': '#000000', 'fill': 'none' }
drw = { 'style':simplestyle.formatStyle(style), inkex.addNS('label','inkscape'):name, 'd':XYstring}
inkex.etree.SubElement(parent, inkex.addNS('path','svg'), drw )
return
class BoxMaker(inkex.Effect):
def __init__(self):
# Call the base class constructor.
inkex.Effect.__init__(self)
# Define options
self.OptionParser.add_option('--unit',action='store',type='string',
dest='unit',default='mm',help='Measure Units')
self.OptionParser.add_option('--inside',action='store',type='int',
dest='inside',default=0,help='Int/Ext Dimension')
self.OptionParser.add_option('--length',action='store',type='float',
dest='length',default=100,help='Length of Box')
self.OptionParser.add_option('--width',action='store',type='float',
dest='width',default=100,help='Width of Box')
self.OptionParser.add_option('--depth',action='store',type='float',
dest='height',default=100,help='Height of Box')
self.OptionParser.add_option('--tab',action='store',type='float',
dest='tab',default=25,help='Nominal Tab Width')
self.OptionParser.add_option('--equal',action='store',type='int',
dest='equal',default=0,help='Equal/Prop Tabs')
self.OptionParser.add_option('--thickness',action='store',type='float',
dest='thickness',default=10,help='Thickness of Material')
self.OptionParser.add_option('--kerf',action='store',type='float',
dest='kerf',default=0.5,help='Kerf (width) of cut')
self.OptionParser.add_option('--clearance',action='store',type='float',
dest='clearance',default=0.01,help='Clearance of joints')
self.OptionParser.add_option('--style',action='store',type='int',
dest='style',default=25,help='Layout/Style')
self.OptionParser.add_option('--spacing',action='store',type='float',
dest='spacing',default=25,help='Part Spacing')
def tabbed_side(self, (rx,ry), (sox,soy), (eox,eoy), tabVec, length, (dirx,diry), isTab):
# root startOffset endOffset tabVec length direction isTab
num_divisions = int(length/nomTab) # divisions
if num_divisions % 2 == 0:
num_divisions -= 1 # make divs odd
num_divisions = float(num_divisions)
tabs = (num_divisions-1)/2 # tabs for side
if equalTabs:
gapWidth = tabWidth = length/num_divisions
else:
tabWidth = nomTab
gapWidth = (length-tabs*nomTab)/(num_divisions-tabs)
# kerf correction
if isTab:
gapWidth -= correction
tabWidth += correction
first = correction/2
else:
gapWidth += correction
tabWidth -= correction
first =- correction/2
s = []
firstVec = 0
secondVec = tabVec
# used to select operation on x or y
dirxN = 0 if dirx else 1
diryN = 0 if diry else 1
(Vx, Vy) = (rx+sox*self.thickness,ry+soy*self.thickness)
s = 'M ' + str(Vx) + ',' + str(Vy) + ' '
if dirxN:
Vy = ry # set correct line start
if diryN:
Vx = rx
# generate line as tab or hole using:
# last co-ord:Vx,Vy ; tab dir:tabVec ; direction:dirx,diry ; thickness:thickness
# divisions:num_divisions ; gap width:gapWidth ; tab width:tabWidth
for n in range(1, int(num_divisions)):
if n % 2 == 1:
Vx = Vx + dirx*gapWidth + dirxN*firstVec + first*dirx
Vy = Vy + diry*gapWidth + diryN*firstVec + first*diry
s += 'L ' + str(Vx) + ',' + str(Vy) + ' '
Vx = Vx + dirxN*secondVec
Vy = Vy + diryN*secondVec
s += 'L ' + str(Vx) + ',' + str(Vy) + ' '
else:
Vx = Vx+dirx*tabWidth+dirxN*firstVec
Vy = Vy+diry*tabWidth+diryN*firstVec
s += 'L ' + str(Vx) + ',' + str(Vy) + ' '
Vx = Vx + dirxN*secondVec
Vy = Vy + diryN*secondVec
s += 'L ' + str(Vx) + ',' + str(Vy) + ' '
(secondVec,firstVec) = (-secondVec,-firstVec) # swap tab direction
first = 0
s += 'L ' + str(rx+eox*self.thickness+dirx*length) + ',' + str(ry+eoy*self.thickness+diry*length) + ' '
return s
def flat_side(self, root, start_offset, end_offset, direction, length):
current_x = root[0] + start_offset[0]*self.thickness
current_y = root[1] + start_offset[1]*self.thickness
draw_cmd = 'M' + str(current_x) + ',' + str(current_y) + ' '
draw_cmd += 'L ' + str(root[0] + end_offset[0]*self.thickness+direction[0]*length) + ','
+ str(root[1] + end_offset[1]*self.thickness+direction[1]*length) + ' '
return draw_cmd
def draw_pieces(self, pieces, thickness, spacing):
for piece in pieces: # generate and draw each piece of the box
(xs,xx,xy,xz) = piece[0]
(ys,yx,yy,yz) = piece[1]
x = xs*spacing + xx*self.x_dim + xy*self.y_dim + xz*self.z_dim # root x co-ord for piece
y = ys*spacing + yx*self.x_dim +yy*self.y_dim + yz*self.z_dim # root y co-ord for piece
dx = piece[2]
dy = piece[3]
tabs = piece[4]
# extract tab status for each side
a = tabs>>3 & 1
b= tabs>>2 & 1
c= tabs>>1 & 1
d= tabs & 1
# generate and draw the sides of each piece
drawS(self.tabbed_side
|
((x,y), (d,a), (-b,a), -thickness if a else thickness, dx, (1,0), a)) # side a
drawS(self.tabbed_side((x+dx,y), (-b,a), (-b,-c), thickness if b else -thickness, dy, (0,1), b)) # side b
drawS(self.tabbed_side((x+dx,y+dy), (-
|
b,-c), (d,-c), thickness if c else -thickness, dx, (-1,0), c)) # side c
drawS(self.tabbed_side((x,y+dy), (d,-c), (d,a), -thickness if d else thickness, dy, (0,-1), d)) # side d
def effect(self):
global parent, nomTab, equalTabs, correction
# Get access to main SVG document element and get its dimensions.
svg = self.document.getroot()
# Get the attibutes:
widthDoc = inkex.unittouu(svg.get('width'))
heightDoc = inkex.unittouu(svg.get('height'))
# Create a new layer.
layer = inkex.etree.SubElement(svg, 'g')
layer.set(inkex.addNS('label', 'inkscape'), 'newlayer')
layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')
parent = self.current_layer
# Get script's option values.
unit = self.options.unit
inside = self.options.inside
self.x_dim = inkex.unittouu( str(self.options.length) + unit )
self.y_dim = inkex.unittouu( str(self.options.width) + unit )
self.z_dim = inkex.unittouu( str(self.options.height) + unit )
thickness = inkex.unittouu( str(self.options.thickness) + unit )
nomTab = inkex.unittouu( str(self.options.tab) + unit )
equalTabs = self.options.equal
kerf = inkex.unittouu( str(self.options.kerf) + unit )
clearance = inkex.unittouu( str(self.options.clearance) + unit )
layout = self.options.style
spacing = inkex.unittouu( str(self.options.spacing) + unit )
self.thickness = thickness
if inside:
# convert inside dimension to outside dimension
self.x_dim += thickness*2
self.y_dim += thickness*2
|
robocomp/learnbot
|
learnbot_dsl/functions/perceptual/camera/is_center_blue_line.py
|
Python
|
gpl-3.0
| 418 | 0.028708 |
from __f
|
uture__ import print_function, absolute_import
import cv2
import numpy as np
import sys, os
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(path)
import visual_auxiliary as va
def is_center_blue_line(lbot):
frame = lbot.getImage()
if frame is not None:
rois = va.detect_blue_line(frame)
maxIndex = np.argmax(rois)
if maxIndex==1 and rois[maxIndex]>20:
return Tr
|
ue
return False
|
hanfang/glmnet_python
|
glmnet_python/elnet.py
|
Python
|
gpl-2.0
| 8,525 | 0.029795 |
# -*- coding: utf-8 -*-
"""
Internal function called by glmnet. See also glmnet, cvglmnet
"""
# import packages/methods
import scipy
import ctypes
from loadGlmLib import loadGlmLib
def elnet(x, is_sparse, irs, pcs, y, weights, offset, gtype, parm, lempty,
nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, intr,
maxit, family):
# load shared fortran library
glmlib = loadGlmLib()
# pre-process data
ybar = scipy.dot(scipy.transpose(y), weights)
ybar = ybar/sum(weights)
nulldev = (y - ybar)**2 * weights
# ka
lst = ['covariance', 'naive']
ka = [i for i in range(len(lst)) if lst[i] == gtype]
if len(ka) == 0:
raise ValueError('unrecognized type for ka');
else:
ka = ka[0] + 1 # convert from 0-based to 1-based index for fortran
# offset
if len(offset) == 0:
offset = y*0
is_offset = False
else:
is_offset = True
# remove offset from y
y = y - offset
# now convert types and allocate memory before calling
# glmnet fortran library
######################################
# --------- PROCESS INPUTS -----------
######################################
# force inputs into fortran order and into the correct scipy datatype
copyFlag = False
x = x.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
irs = irs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)
pcs = pcs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)
y = y.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
weights = weights.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
jd = jd.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)
vp = vp.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
cl = cl.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
ulam = ulam.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
######################################
# --------- ALLOCATE OUTPUTS ---------
######################################
# lmu
lmu = -1
lmu_r = ctypes.c_int(lmu)
# a0
a0 = scipy.zeros([nlam], dtype = scipy.float64)
a0 = a0.astype(dtype = scipy.float64, order = 'F', copy = False)
a0_r = a0.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# ca
ca = scipy.zeros([nx, nlam], dtype = scipy.float64)
ca = ca.astype(dtype = scipy.float64, order = 'F', copy = False)
ca_r = ca.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# ia
ia = -1*scipy.ones([nx], dtype = scipy.int32)
ia = ia.astype(dtype = scipy.int32, order = 'F', copy = False)
ia_r = ia.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
# nin
nin = -1*scipy.ones([nlam], dtype = scipy.int32)
nin = nin.astype(dtype = scipy.int32, order = 'F', copy = False)
nin_r = nin.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
# rsq
rsq = -1*scipy.ones([nlam], dtype = scipy.float64)
rsq = rsq.astype(dtype = scipy.float64, order = 'F', copy = False)
rsq_r = rsq.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# alm
alm = -1*scipy.ones([nlam], dtype = scipy.float64)
alm = alm.astype(dtype = scipy.float64, order = 'F', copy = False)
alm_r = alm.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# nlp
nlp = -1
nlp_r = ctypes.c_int(nlp)
# jerr
jerr = -1
jerr_r = ctypes.c_int(jerr)
# ###################################
# main glmnet fortran caller
# ###################################
if is_sparse:
# sparse elnet
glmlib.spelnet_(
ctypes.byref(ctypes.c_int(ka)),
ctypes.byref(ctypes.c_double(parm)),
ctypes.byref(ctypes.c_int(len(weights))),
ctypes.byref(ctypes.c_int(nvars)),
x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
pcs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
irs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_int(ne)),
ctypes.byref(ctypes.c_int(nx)),
ctypes.byref(ctypes.c_int(nlam)),
ctypes.byref(ctypes.c_double(flmin)),
ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_double(thresh)),
ctypes.byref(ctypes.c_int(isd)),
ctypes.byref(ctypes.c_int(intr)),
ctypes.byref(ctypes.c_int(maxit)),
ctypes.byref(lmu_r),
a0_r,
ca_r,
ia_r,
nin_r,
rsq_r,
alm_r,
ctypes.byref(nlp_r),
ctypes.byref(jerr_r)
)
else:
# call fortran elnet routine
glmlib.elnet_(
ctypes.byref(ctypes.c_int(ka)),
ctypes.byref(ctypes.c_double(parm)),
ctypes.byref(ctypes.c_int(len(weights))),
ctypes.byref(ctypes.c_int(nvars)),
x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_int(ne)),
ctypes.byref(ctypes.c_int(nx)),
ctypes.byref(ctypes.c_int(nlam)),
ctypes.byref(ctypes.c_double(flmin)),
ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_double(thresh)),
ctypes.byref(ctypes.c_int(isd)),
ctypes.byref(ctypes.c_int(intr)),
ctypes.byref(ctypes.c_int(maxit)),
ctypes.byref(lmu_r),
a0_r,
ca_r,
ia_r,
nin_r,
rsq_r,
alm_r,
ctypes.byref(nlp_r),
ctypes.byref(jerr_r)
)
# ###################################
# post process results
# ###################################
# check for error
if (jerr_r.value > 0):
raise ValueError("Fatal glmnet error in library call : error code = ", jerr_r.value)
elif (jerr_r.value < 0):
print("Warning: Non-fatal error in glmnet library call: error code = ", jerr_r.value)
print("Check results for accuracy. Partial or no results returned.")
# clip output to correct sizes
lmu = lmu_r.value
a0 = a0[0:lmu]
ca = ca[0:nx, 0:lmu]
ia = ia[0:nx]
nin = nin[0:lmu]
rsq = rsq[0:lmu]
alm = alm[0:lmu]
# ninmax
ninmax = max(nin)
# fix first value of alm (from inf to correct value)
if lempty:
t1 = scipy.log(alm[1])
t2 = scipy.log(alm[2])
alm[0] = scipy.exp(2*t1 - t2)
# create return fit dictionary
if ninmax > 0:
ca = ca[0:ninmax, :]
df = scipy.sum(scipy.absolute(ca) > 0, axis=0)
ja =
|
ia[0:ninmax] - 1 # ia is 1-indexed in fortran
oja = scipy.argsort(ja)
ja1 = ja[oja]
beta = scipy.zeros([nvars, lmu], dtype = scipy.float64)
beta[ja1, :] = ca[oja, :]
|
else:
beta = scipy.zeros([nvars, lmu], dtype = scipy.float64)
df = scipy.zeros([1, lmu], dtype = scipy.float64)
fit = dict()
fit['a0'] = a0
fit['beta'] = beta
fit['dev'] = rsq
fit['nulldev'] = nulldev
fit['df']= df
fit['lambdau'] = alm
fit['npasses'] = nlp_r.value
fit['jerr'] = jerr_r.value
fit['dim'] = scipy.array(
|
ad-m/pru
|
pru/blog/views.py
|
Python
|
bsd-3-clause
| 1,186 | 0 |
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView, ListView
from braces.views import OrderableListMixin
from .models import Post, Tag
ORDER_FIELD = {'title': 'title'}
class PermissionMixin(object):
def get_queryset(self, *args, **kwargs):
qs = super(PermissionMixin, self).get_queryset(*args, **kwargs)
return qs.for_user(self.request.user)
class PostDetailView(PermissionMixin, DetailView):
model = Post
class PostListView(PermissionMixin, OrderableListMixin, ListView):
model = Post
paginate_by = 10
orderable_columns = ("pk", "name", "city")
orderable_columns_default = "created_on"
def get_queryset(self, *args, **kwargs):
qs = super(PostDetailView, self).get_queryset(*args, **kwargs)
if 'tag_slug' in self.kwar
|
gs:
self.tag = get_object_or_404(Tag, slug=self.kwargs['tag_slug'])
qs = qs.filter(tags__in=self.tag)
return qs
def g
|
et_context_data(self, **kwargs):
context = super(PostListView, self).get_context_data(**kwargs)
if hasattr(self, 'tag'):
self.context['object'] = self.tag
return context
|
Ryan-Amaral/PyTPG
|
tpg_tests/test_utils.py
|
Python
|
mit
| 2,999 | 0.015005 |
import random
import numpy as np
from tpg.learner import Learner
from tpg.action_object import ActionObject
from tpg.program import Program
from tpg.team import Team
dummy_init_params = {
'generation': 0,
'actionCodes':[
0,1,2,3,4,5,6,7,8,9,10,11
]
}
dummy_mutate_params = {
'pProgMut': 0.5,
'pActMut': 0.5,
'pActAtom': 0.5,
'pProgMut': 0.5,
'pInstDel': 0.5,
'pInstMut': 0.5,
'pInstAdd': 0.5,
'pLrnDel': 0.5,
'pLrnAdd': 0.5,
'pLrnMut': 0.5,
'nOperations': 8,
'nDestinations': 8,
'inputSize': 8,
'actionCodes':[
0,1,2,3,4,5,6,7,8,9,10,11
],
'pInstSwp':0.5,
'generation': 1
}
'''
Dummy Creates
These should be used to test constructs other than the ones
|
being created by the function. For example, to test a Team
you would create dummy programs and learners. But you wouldn't
use the create_dummy_team function to test the creation
|
of a team.
This is because these methods verify nothing about the init procedure
of the class they're returning an object of.
'''
'''
Create a dummy program with some preset values
'''
def create_dummy_program():
program = Program(
maxProgramLength=128,
nOperations=7,
nDestinations=8,
inputSize=100,
initParams = dummy_init_params
)
return program
'''
Create dummy team with some number of learners.
Returns the team and the learners added to it
'''
def create_dummy_team(num_learners=2):
team = Team(dummy_init_params)
learners = []
for x in range(0, num_learners):
learner = create_dummy_learner()
learners.append(learner)
team.addLearner(learner)
return team, learners
'''
Create a dummy action object
'''
def create_dummy_action_object():
action_object = ActionObject(action=random.randint(0,10), initParams=dummy_init_params)
return action_object
'''
Create a dummy action object with a given team
'''
def create_dummy_team_action(team):
action_object = ActionObject(team, initParams=dummy_init_params)
return action_object
'''
Create a dummy learner with some preset values
'''
def create_dummy_learner():
learner = Learner(
dummy_init_params,
program=create_dummy_program(),
actionObj=create_dummy_action_object(),
numRegisters=8
)
return learner
'''
Create a list of dummy learners
'''
def create_dummy_learners(num_learners=100):
learners = []
for i in range(num_learners):
learners.append(create_dummy_learner())
return learners
"""
Transform visual input from ALE to flat vector.
inState should be made int32 before passing in.
"""
def getStateALE(inState):
# each row is all 1 color
rgbRows = np.reshape(inState,(len(inState[0])*len(inState), 3)).T
# add each with appropriate shifting
# get RRRRRRRR GGGGGGGG BBBBBBBB
return np.add(np.left_shift(rgbRows[0], 16),
np.add(np.left_shift(rgbRows[1], 8), rgbRows[2]))
|
avanzosc/avanzosc6.1
|
steel_quality_test/__init__.py
|
Python
|
agpl-3.0
| 50 | 0 |
#
# Generated by the Op
|
en ERP m
|
odule recorder !
#
|
tchellomello/home-assistant
|
homeassistant/components/mobile_app/webhook.py
|
Python
|
apache-2.0
| 18,676 | 0.00075 |
"""Webhook handlers for mobile_app."""
import asyncio
from functools import wraps
import logging
import secrets
from aiohttp.web import HTTPBadRequest, Request, Response, json_response
from nacl.secret import SecretBox
import voluptuous as vol
from homeassistant.components import notify as hass_notify, tag
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_CLASSES,
)
from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM
from homeassistant.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from homeassistant.components.frontend import MANIFEST_JSON
from homeassistant.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES
from homeassistant.components.zone.const import DOMAIN as ZONE_DOMAIN
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_WEBHOOK_ID,
HTTP_BAD_REQUEST,
HTTP_CREATED,
)
from homeassistant.core import EventOrigin
from homeassistant.exceptions import HomeAssistantError, ServiceNotFound, TemplateError
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.template impo
|
rt attach
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.decorator import Registry
from .const import (
ATTR_ALTITUDE,
ATTR_APP_DATA,
ATTR_APP_VERSION,
ATTR_CAMERA_ENTITY_ID,
ATTR_COURSE,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_EVENT_DATA,
|
ATTR_EVENT_TYPE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_TYPE_BINARY_SENSOR,
ATTR_SENSOR_TYPE_SENSOR,
ATTR_SENSOR_UNIQUE_ID,
ATTR_SENSOR_UOM,
ATTR_SPEED,
ATTR_SUPPORTS_ENCRYPTION,
ATTR_TEMPLATE,
ATTR_TEMPLATE_VARIABLES,
ATTR_VERTICAL_ACCURACY,
ATTR_WEBHOOK_DATA,
ATTR_WEBHOOK_ENCRYPTED,
ATTR_WEBHOOK_ENCRYPTED_DATA,
ATTR_WEBHOOK_TYPE,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
ERR_ENCRYPTION_ALREADY_ENABLED,
ERR_ENCRYPTION_NOT_AVAILABLE,
ERR_ENCRYPTION_REQUIRED,
ERR_INVALID_FORMAT,
ERR_SENSOR_NOT_REGISTERED,
SIGNAL_LOCATION_UPDATE,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import (
_decrypt_payload,
empty_okay_response,
error_response,
registration_context,
safe_registration,
savable_state,
supports_encryption,
webhook_response,
)
_LOGGER = logging.getLogger(__name__)
DELAY_SAVE = 10
WEBHOOK_COMMANDS = Registry()
COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES)
SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR]
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEBHOOK_TYPE): cv.string,
vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list),
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
}
)
def validate_schema(schema):
"""Decorate a webhook function with a schema."""
if isinstance(schema, dict):
schema = vol.Schema(schema)
def wrapper(func):
"""Wrap function so we validate schema."""
@wraps(func)
async def validate_and_run(hass, config_entry, data):
"""Validate input and call handler."""
try:
data = schema(data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
return await func(hass, config_entry, data)
return validate_and_run
return wrapper
async def handle_webhook(
hass: HomeAssistantType, webhook_id: str, request: Request
) -> Response:
"""Handle webhook callback."""
if webhook_id in hass.data[DOMAIN][DATA_DELETED_IDS]:
return Response(status=410)
config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
device_name = config_entry.data[ATTR_DEVICE_NAME]
try:
req_data = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from mobile_app device: %s", device_name)
return empty_okay_response(status=HTTP_BAD_REQUEST)
if (
ATTR_WEBHOOK_ENCRYPTED not in req_data
and config_entry.data[ATTR_SUPPORTS_ENCRYPTION]
):
_LOGGER.warning(
"Refusing to accept unencrypted webhook from %s",
device_name,
)
return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required")
try:
req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(req_data, ex)
_LOGGER.error(
"Received invalid webhook from %s with payload: %s", device_name, err
)
return empty_okay_response()
webhook_type = req_data[ATTR_WEBHOOK_TYPE]
webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {})
if req_data[ATTR_WEBHOOK_ENCRYPTED]:
enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA]
webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data)
if webhook_type not in WEBHOOK_COMMANDS:
_LOGGER.error(
"Received invalid webhook from %s of type: %s", device_name, webhook_type
)
return empty_okay_response()
_LOGGER.debug(
"Received webhook payload from %s for type %s: %s",
device_name,
webhook_type,
webhook_payload,
)
# Shield so we make sure we finish the webhook, even if sender hangs up.
return await asyncio.shield(
WEBHOOK_COMMANDS[webhook_type](hass, config_entry, webhook_payload)
)
@WEBHOOK_COMMANDS.register("call_service")
@validate_schema(
{
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
}
)
async def webhook_call_service(hass, config_entry, data):
"""Handle a call service webhook."""
try:
await hass.services.async_call(
data[ATTR_DOMAIN],
data[ATTR_SERVICE],
data[ATTR_SERVICE_DATA],
blocking=True,
context=registration_context(config_entry.data),
)
except (vol.Invalid, ServiceNotFound, Exception) as ex:
_LOGGER.error(
"Error when calling service during mobile_app "
"webhook (device name: %s): %s",
config_entry.data[ATTR_DEVICE_NAME],
ex,
)
raise HTTPBadRequest() from ex
return empty_okay_response()
@WEBHOOK_COMMANDS.register("fire_event")
@validate_schema(
{
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
}
)
async def webhook_fire_event(hass, config_entry, data):
"""Handle a fire event webhook."""
event_type = data[ATTR_EVENT_TYPE]
hass.bus.async_fire(
event_type,
data[ATTR_EVENT_DATA],
EventOrigin.remote,
context=registration_context(config_entry.data),
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("stream_camera")
@validate_schema({vol.Required(ATTR_CAMERA_ENTITY_ID): cv.string})
async def webhook_stream_camera(hass, config_entry, data):
"""Handle a request to HLS-stream a camera."""
camera = hass.states.get(data[ATTR_CAMERA_ENTITY_ID])
if camera is None:
return webhook_response(
{"success": False},
registration=config_entry.data,
status=HTTP_BAD_REQUEST,
)
resp = {"mjpeg_path": "/api/camera_proxy_stream/%s" % (camera.entity_id)}
if camera.attributes["supported_features"] & CAMERA_SUPPORT_STREAM:
try:
resp["hls_path"] = await hass.components.camer
|
teampopong/crawlers
|
election_commission/main.py
|
Python
|
agpl-3.0
| 4,602 | 0.007388 |
#!/usr/bin/python2.7
# -*- encoding=utf-8 -*-
from argparse import ArgumentParser, RawTextHelpFormatter
import codecs
import gevent
from gevent import monkey
import json
from types import UnicodeType
from crawlers import Crawler
from crawlers.local.static import get_election_type_name
from utils import check_dir
def print_json(filename, data):
with open(filename, 'w') as f:
json.dump(data, f, encoding="UTF-8", indent=2)
def print_csv(filename, data):
def transform(txt):
if isinstance(txt, int):
txt = str(txt)
if isinstance(txt, list):
txt = '||'.join(txt)
txt = txt.replace(',', '|')
if isinstance(txt, UnicodeType):
txt = txt.encode('utf8')
return txt
attrs = ['assembly_no', 'district', 'cand_no', 'party', 'name_kr',
'name_cn', 'sex', 'birthyear', 'birthmonth', 'birthday',
'address', 'job', 'education', 'experience', 'recommend_priority',
'votenum', 'voterate', 'elected']
with open(filename, 'w') as f:
f.write(codecs.BOM_UTF8)
f.write(','.join(attrs))
f.write('\n')
for cand in data:
values = (cand[attr] if attr in cand else '' for attr in attrs)
values = (transform(value) for value in values)
f.write(','.join(values))
f.write('\n')
def crawl(target, _type, nth, printer, filename, level=None):
crawler = Crawler(target, _type, nth, level)
cand_list = crawler.crawl()
printer(filename, cand_list)
def create_parser():
parser = Argu
|
mentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('target'
|
, choices=['assembly', 'local', 'president'],\
help="name of target election")
parser.add_argument('type', choices=['candidates', 'elected', 'precandidates'],
help="type of person")
parser.add_argument('start', help="starting election id", type=float)
parser.add_argument('end', help="ending election id", type=float,\
nargs='?', default=None)
parser.add_argument('-t', '--test', dest='test', action='store_true',
help="assign datatype to csv instead of json")
parser.add_argument('-d', dest='directory', help="specify data directory")
# TODO: change to subparser
parser.add_argument('-l', choices=['pg', 'pm', 'pp', 'mg', 'mm', 'mp', 'eg', 'em'],
dest="level",
help="specify level for local elections.\n"
"- 1st char: {p:province, m:municipality, e:education},\n"
"- 2nd char: {g: governor, m: member}")
return parser
def main(args):
printer = print_csv if args.test else print_json
filetype = 'csv' if args.test else 'json'
datadir = args.directory if args.directory else '.'
check_dir(datadir)
if args.target=='local':
if args.end:
jobs = []
args.level = get_election_type_name(args.level)
for n in xrange(args.start, args.end+1):
filename = '%s/%s-%s-%s-%d.%s'\
% (datadir, args.target, args.level, args.type, n, filetype)
job = gevent.spawn(crawl, target=args.target, level=args.level,\
_type=args.type, nth=n, filename=filename, printer=printer)
jobs.append(job)
gevent.joinall(jobs)
else:
n = args.start
args.level = get_election_type_name(args.level)
filename = '%s/%s-%s-%s-%.01f.%s' %\
(datadir, args.target, args.level, args.type, n, filetype)
crawl(target=args.target, level=args.level, _type=args.type, nth=n,\
filename=filename, printer=printer)
else:
if args.end:
jobs = []
for n in xrange(args.start, args.end+1):
filename = '%s/%s-%s-%d.%s'\
% (datadir, args.target, args.type, n, filetype)
job = gevent.spawn(crawl, target=args.target, _type=args.type, nth=n,\
filename=filename, printer=printer)
jobs.append(job)
gevent.joinall(jobs)
else:
n = args.start
filename = '%s/%s-%s-%.01f.%s' %\
(datadir, args.target, args.type, n, filetype)
crawl(target=args.target, _type=args.type, nth=n,\
filename=filename, printer=printer)
print 'Data written to %s' % filename
if __name__ == '__main__':
monkey.patch_all()
parser = create_parser()
args = parser.parse_args()
main(args)
|
Dhivyap/ansible
|
lib/ansible/modules/files/template.py
|
Python
|
gpl-3.0
| 2,564 | 0.00234 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Pr
|
oject
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
|
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: template
version_added: historical
options:
follow:
description:
- Determine whether symbolic links should be followed.
- When set to C(yes) symbolic links will be followed, if they exist.
- When set to C(no) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as C(yes).
type: bool
default: no
version_added: '2.4'
notes:
- You can use the M(copy) module with the C(content:) option if you prefer the template inline,
as part of the playbook.
- For Windows you can use M(win_template) which uses '\\r\\n' as C(newline_sequence) by default.
seealso:
- module: copy
- module: win_copy
- module: win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- backup
- files
- template_common
- validate
'''
EXAMPLES = r'''
- name: Template a file to /etc/files.conf
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
template:
src: named.conf_{{ ansible_os_family}}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: 0640
- name: Create a DOS-style text file from a template
template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
|
karljakoblarsson/Rattan-Geometry
|
Utils.py
|
Python
|
mit
| 130 | 0.007692 |
import
|
os
def run(name='test1.py'):
filename = os.getcwd() + name
exec(compile(open(filename).read(), filename,
|
'exec'))
|
GinoGalotti/python-selenium-utils
|
SeleniumPythonFramework/src/main/Pages/HomePage.py
|
Python
|
apache-2.0
| 710 | 0 |
from selenium.webdriver.common.by import By
from SeleniumPythonFramework.src.main.Pages.CommonPage import CommonPage
# Production locations
TRY_TEXT = {"by": By.ID, "locator": "url-input"}
TRY_BUTTON = {"by": By.ID, "locator": "get-data"}
PATH = ""
class HomePage(CommonPage):
def __init__(se
|
lf, **kwargs):
super(HomePage, self).__init__(page_url=PATH, **kwargs)
def try_url_text(self):
return self.get_element(TRY_TEXT)
def try_url_button(self):
return self.get_element(TRY_BUTTON)
def try_url(self, url):
self.try_url_text().send_keys(url)
try_button = se
|
lf.try_url_button()
with self.wait_for_page_load:
try_button.click()
|
hugoatease/encas
|
errors.py
|
Python
|
gpl-3.0
| 2,359 | 0.010598 |
# Encas Sales Management Server
# Copyright 2013 - Hugo Caille
#
# This file is part of Encas.
#
# Encas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Encas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Encas. If not, see <http://www.gnu.org/licenses/>.
from functools import wraps
from flask import jsonify
from sqlalchemy.exc import OperationalError
class ApiError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return repr(self.reason)
def serialize(self):
return {'error' : True, 'reason' : self.reason}
class MissingFieldsError(Exception):
def __init__(self, fields):
self.fields = fields
self.fields.sort()
def reason(self):
fields_len = len(self.fields)
i = 0
msg = "Missing fields: "
for field in self.fields:
msg += str(field)
if i < fields_len - 1:
msg += ", "
else:
msg += "."
i += 1
return msg
def __str__(self):
return self.reason()
def serialize(self):
return {'error' : True, 'reason' : self.reason()}
def errorhandler(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
data = func(*args, **kwargs)
result = {'error' : False}
if data is not None:
result['data'] = data
return jsonify(result)
e
|
xcept MissingFieldsError as e:
return jsonify(e.serialize())
except ApiError as e:
return jsonify(e.serialize())
except OperationalError as e:
return jsonify({'error' : True, 'reason' : "Cannot access database"})
except ValueError:
return jsonify({'error' : True, 'reason' : "Invalid input"})
return wrappe
|
r
|
MetaPlot/MetaPlot
|
metaplot/helpers.py
|
Python
|
mit
| 4,900 | 0.008776 |
from __future__ import print_function
import os
import sys
import subprocess
import pkg_resources
try:
import pkg_resources
_has_pkg_resources = True
except:
_has_pkg_resources = False
try:
import svn.local
_has_svn_local = True
except:
_has_svn_local = False
def test_helper():
return "test helper text"
def dict_to_str(d):
"""
Given a dictionary d, return a string with
each entry in the form 'key: value' and entries
separated by newlines.
"""
vals = []
for k in d.keys():
vals.append('{}: {}'.format(k, d[k]))
v = '\n'.join(vals)
return v
def module_version(module, label=None):
"""
Helper function for getting the module ("module") in the current
namespace and their versions.
The optional argument 'label' allows you to set the
string used as the dictionary key in the returned dictionary.
By default the key is '[module] version'.
"""
if not _has_pkg_resources:
return {}
version = pkg_resources.get_distribution(module).version
if label:
k = '{}'.format(label)
else:
k = '{} version'.format(module)
return {k: '{}'.format(version)}
def file_contents(filename, label=None):
"""
Helper function for getting the contents of a file,
provided the filename.
Returns a dictionary keyed (by default) with the filename
where the value is a string containing the contents of the file.
The optional argument 'label' allows you to set the
string used as the dictionary key in the returned dictionary.
"""
if not os.path.isfile(filename):
print('ERROR: {} NOT FOUND.'.format(filename))
return {}
else:
fin = open(filename, 'r')
contents = ''
for l in fin:
contents += l
if label:
d = {'{}'.format(label): contents}
else:
d = {filename: contents}
return d
def svn_information(svndir=None, label=None):
"""
Helper function for obtaining the SVN repository
information for the current directory (default)
or the directory supplied in the svndir argument.
Returns a dictionary keyed (by de
|
fault) as 'SVN INFO'
where the value is a string containing essentially what
is returned by 'svn info'.
The optional argument 'label' allows you to set the
string used
|
as the dictionary key in the returned dictionary.
"""
if not _has_svn_local:
print('SVN information unavailable.')
print('You do not have the "svn" package installed.')
print('Install "svn" from pip using "pip install svn"')
return {}
if svndir:
repo = svn.local.LocalClient(svndir)
else:
repo = svn.local.LocalClient(os.getcwd())
try:
# Get a dictionary of the SVN repository information
info = repo.info()
except:
print('ERROR: WORKING DIRECTORY NOT AN SVN REPOSITORY.')
return {}
v = dict_to_str(info)
if label:
k = '{}'.format(label)
else:
k = 'SVN INFO'
return {k: v}
def get_git_hash(gitpath=None, label=None):
"""
Helper function for obtaining the git repository hash.
for the current directory (default)
or the directory supplied in the gitpath argument.
Returns a dictionary keyed (by default) as 'GIT HASH'
where the value is a string containing essentially what
is returned by subprocess.
The optional argument 'label' allows you to set the string
used as the dictionary key in the returned dictionary.
"""
if gitpath:
thisdir = os.getcwd()
os.chdir(gitpath)
try:
sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
except subprocess.CalledProcessError as e:
print("ERROR: WORKING DIRECTORY NOT A GIT REPOSITORY")
return {}
if label:
l = '{}'.format(label)
else:
l = 'GIT HASH'
return {l:sha}
def get_source_code(scode,sourcepath=None, label=None):
"""
Helper function for obtaining the source code.
for the current directory (default) or the directory
supplied in the sourcepath argument.
Returns a dictionary keyed (by default) as 'source code'
where the value is a string containing the source code.
The optional argument 'label' allows you to set the string
used as the dictionary key in the returned dictionary.
"""
if sourcepath:
os.chdir(sourcepath)
if not os.path.isfile(scode):
print('ERROR: {} NOT FOUND.'.format(scode))
return {}
else:
with open(scode,'r') as f:
s = f.read()
if label:
n = {'{}'.format(label):s}
else:
n = {'source code':s}
return n
|
hasibi/TAGME-Reproducibility
|
nordlys/tagme/tagme.py
|
Python
|
mit
| 11,198 | 0.001875 |
"""
TAGME implementation
@author: Faegheh Hasibi (faegheh.hasibi@idi.ntnu.no)
"""
import argparse
import math
from nordlys.config import OUTPUT_DIR
from nordlys.tagme import config
from nordlys.tagme import test_coll
from nordlys.tagme.query import Query
from nordlys.tagme.mention import Mention
from nordlys.tagme.lucene_tools import Lucene
ENTITY_INDEX = Lucene(config.INDEX_PATH)
ANNOT_INDEX = Lucene(config.INDEX_ANNOT_PATH, use_ram=True)
# ENTITY_INDEX = IndexCache("/data/wikipedia-indices/20120502-index1")
# ANNOT_INDEX = IndexCache("/data/wikipedia-indices/20120502-index1-annot/", use_ram=True)
ENTITY_INDEX.open_searcher()
ANNOT_INDEX.open_searcher()
class Tagme(object):
DEBUG = 0
def __init__(self, query, rho_th, sf_source="wiki"):
self.query = query
self.rho_th = rho_th
self.sf_source = sf_source
# TAMGE params
self.link_prob_th = 0.001
self.cmn_th = 0.02
self.k_th = 0.3
self.link_probs = {}
self.in_links = {}
self.rel_scores = {} # dictionary {men: {en: rel_score, ...}, ...}
self.disamb_ens = {}
def parse(self):
"""
Parses the query and returns all candidate mention-entity pairs.
:return: candidate entities {men:{en:cmn, ...}, ...}
"""
ens = {}
for ngram in self.query.get_ngrams():
mention = Mention(ngram)
# performs mention filtering (based on the paper)
if (len(ngram) == 1) or (ngram.isdigit()) or (mention.wiki_occurrences < 2) or (len(ngram.split()) > 6):
continue
link_prob = self.__get_link_prob(mention)
if link_prob < self.link_prob_th:
continue
# These mentions will be kept
self.link_probs[ngram] = link_prob
# Filters entities by cmn threshold 0.001; this was only in TAGME source code and speeds up the process.
# TAGME source code: it.acubelab.tagme.anchor (lines 279-284)
ens[ngram] = mention.get_men_candidate_ens(0.001)
# filters containment mentions (based on paper)
candidate_entities = {}
sorted_mentions = sorted(ens.keys(), key=lambda item: len(item.split())) # sorts by mention length
for i in range(0, len(sorted_mentions)):
m_i = sorted_mentions[i]
ignore_m_i = False
for j in range(i+1, len(sorted_mentions)):
m_j = sorted_mentions[j]
if (m_i in m_j) and (self.link_probs[m_i] < self.link_probs[m_j]):
ignore_m_i = True
break
if not ignore_m_i:
candidate_entities[m_i] = ens[m_i]
return candidate_entities
def disambiguate(self, candidate_entities):
"""
Performs disambiguation and link each mention to a single entity.
:param candidate_entities: {men:{en:cmn, ...}, ...}
:return: disambiguated entities {men:en, ...}
"""
# Gets the relevance score
rel_scores = {}
for m_i in candidate_entities.keys():
if self.DEBUG:
print "********************", m_i, "********************"
rel_scores[m_i] = {}
for e_m_i in candidate_entities[m_i].keys():
if self.DEBUG:
print "-- ", e_m_i
rel_scores[m_i][e_m_i] = 0
for m_j in candidate_entities.keys(): # all other mentions
if (m_i == m_j) or (len(candidate_entities[m_j].keys()) == 0):
continue
vote_e_m_j = self.__get_vote(e_m_i, candidate_entities[m_j])
rel_scores[m_i][e_m_i] += vote_e_m_j
if self.DEBUG:
print m_j, vote_e_m_j
# pruning uncommon entities (based on the paper)
self.rel_scores = {}
for m_i in rel_scores:
for e_m_i in rel_scores[m_i]:
cmn = candidate_entities[m_i][e_m_i]
if cmn >= self.cmn_th:
if m_i not in self.rel_scores:
self.rel_scores[m_i] = {}
self.rel_scores[m_i][e_m_i] = rel_scores[m_i][e_m_i]
# DT pruning
disamb_ens = {}
for m_i in self.rel_scores:
if len(self.rel_scores[m_i].keys()) == 0:
continue
top_k_ens = self.__get_top_k(m_i)
best_cmn = 0
best_en = None
for en in top_k_ens:
cmn = candidate_entities[m_i][en]
if cmn >= best_cmn:
best_en = en
best_cmn = cmn
disamb_ens[m_i] = best_en
return disamb_ens
def prune(self, dismab_ens):
"""
Performs AVG pruning.
:param dismab_ens: {men: en, ... }
:return: {men: (en, score), ...}
"""
linked_ens = {}
for men, en in dismab_ens.iteritems():
coh_score = self.__get_coherence_score(men, en, dismab_ens)
rho_score = (self.link_probs[men] + coh_score) / 2.0
if rho_score >= self.rho_th:
linked_ens[men] = (en, rho_score)
return linked_ens
def __get_link_prob(self, mention):
"""
Gets link probability for the given mention.
Here, in fact, we are computing key-phraseness.
"""
pq = ENTITY_INDEX.get_phrase_query(mention.text, Lucene.FIELDNAME_CONTENTS)
mention_freq = ENTITY_INDEX.searcher.search(pq, 1).totalHits
if mention_freq == 0:
return 0
if self.sf_source == "wiki":
link_prob = mention.wiki_occurrences / float(mention_freq)
# This is TAGME implementation, from source code:
# link_prob = float(mention.wiki_occurrences) / max(mention_freq, mention.wiki_occurrences)
elif self.sf_source == "facc":
link_prob = mention.facc_occurrences / float(mention_freq)
return link_prob
def __get_vote(self, entity, men_cand_ens):
"""
vote_e = sum_e_i(mw_rel(e, e_i) * cmn(e_i)) / i
:param entity: en
:param men_cand_ens: {en: cmn, ...}
:return: voting score
"""
entity = entity if self.sf_source == "wiki" else entity[0]
vote = 0
for e_i, cmn in men_cand_ens.iteritems():
e_i = e_i if self.sf_source == "wiki" else e_i[0]
mw_rel = self.__get_mw_rel(entity, e_i)
# print "\t", e_i, "cmn:", cm
|
n, "mw_rel:", mw_rel
vote += cmn * mw_rel
vote /= float(len(men_cand_ens))
return vote
def __get_mw_rel(self, e1, e2):
"""
Calculates Milne & Witten relatedness for two entities.
This implementation is based on Dexter implementation (which is similar to TAGME implementation).
- Dexter implementation: https://github.com/dexter/dexter/
|
blob/master/dexter-core/src/main/java/it/cnr/isti/hpc/dexter/relatedness/MilneRelatedness.java
- TAGME: it.acubelab.tagme.preprocessing.graphs.OnTheFlyArrayMeasure
"""
if e1 == e2: # to speed-up
return 1.0
en_uris = tuple(sorted({e1, e2}))
ens_in_links = [self.__get_in_links([en_uri]) for en_uri in en_uris]
if min(ens_in_links) == 0:
return 0
conj = self.__get_in_links(en_uris)
if conj == 0:
return 0
numerator = math.log(max(ens_in_links)) - math.log(conj)
denominator = math.log(ANNOT_INDEX.num_docs()) - math.log(min(ens_in_links))
rel = 1 - (numerator / denominator)
if rel < 0:
return 0
return rel
def __get_in_links(self, en_uris):
"""
returns "and" occurrences of entities in the corpus.
:param en_uris: list of dbp_uris
"""
en_uris = tuple(sorted(set(en_uris)))
if en_uris in self.in_links:
return self.in_links[en_uris]
term_queries = []
for en_uri in en_uris:
term_queries.append(ANNOT_INDEX.get_id_lookup_query(en_uri, Lucene.FIELDNAME_CO
|
alxgu/ansible
|
lib/ansible/modules/windows/win_xml.py
|
Python
|
gpl-3.0
| 2,841 | 0.00176 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
's
|
upported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_xml
version_added: "2.
|
7"
short_description: Add XML fragment to an XML parent
description:
- Adds XML fragments formatted as strings to existing XML on remote servers.
- For non-Windows targets, use the M(xml) module instead.
options:
path:
description:
- The path of remote servers XML.
type: path
required: true
aliases: [ dest, file ]
fragment:
description:
- The string representation of the XML fragment to be added.
type: str
required: true
aliases: [ xmlstring ]
xpath:
description:
- The node of the remote server XML where the fragment will go.
type: str
required: true
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
type:
description:
- The type of XML you are working with.
type: str
required: yes
default: element
choices: [ attribute, element, text ]
attribute:
description:
- The attribute name if the type is 'attribute'.
- Required if C(type=attribute).
type: str
author:
- Richard Levenberg (@richardcs)
'''
EXAMPLES = r'''
- name: Apply our filter to Tomcat web.xml
win_xml:
path: C:\apache-tomcat\webapps\myapp\WEB-INF\web.xml
fragment: '<filter><filter-name>MyFilter</filter-name><filter-class>com.example.MyFilter</filter-class></filter>'
xpath: '/*'
- name: Apply sslEnabledProtocols to Tomcat's server.xml
win_xml:
path: C:\Tomcat\conf\server.xml
xpath: '//Server/Service[@name="Catalina"]/Connector[@port="9443"]'
attribute: 'sslEnabledProtocols'
fragment: 'TLSv1,TLSv1.1,TLSv1.2'
type: attribute
'''
RETURN = r'''
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
msg:
description: What was done.
returned: always
type: str
sample: "xml added"
err:
description: XML comparison exceptions.
returned: always, for type element and -vvv or more
type: list
sample: attribute mismatch for actual=string
'''
|
jcmgray/autoray
|
tests/test_lazy.py
|
Python
|
apache-2.0
| 11,880 | 0 |
import functools
import re
import pytest
from autoray import do, lazy, to_numpy, infer_backend, get_dtype_name, astype
from numpy.testing import assert_allclose
from .test_autoray import BACKENDS, gen_rand
def test_manual_construct():
def foo(a, b, c):
a1, a2 = a
b1 = b['1']
c1, c2 = c['sub']
return do('sum', do('stack', (a1, a2, b1, c1, c2)), axis=0)
x = do('r
|
andom.uniform', size=(5, 7), like='numpy')
x0 = lazy.array(x[0, :])
x1 = lazy.array(x[1, :])
x2 = lazy.array(x[2, :])
x3 = lazy.array(x[3, :])
x4 = lazy.array(x[4, :]
|
)
y = lazy.LazyArray(
backend=infer_backend(x),
fn=foo,
args=((x0, x1), {'1': x2}),
kwargs=dict(c={'sub': (x3, x4)}),
shape=(7,),
dtype='float64',
)
assert y.deps == (x0, x1, x2, x3, x4)
assert re.match(
r'x\d+ = foo\d+\(\(x\d+, x\d+,\), '
r'{1: x\d+}, c: {sub: \(x\d+, x\d+,\)}\)',
y.get_source()
)
assert_allclose(y.compute(), x.sum(0))
def modified_gram_schmidt(X):
Q = []
for j in range(0, X.shape[0]):
q = X[j, :]
for i in range(0, j):
rij = do("tensordot", do("conj", Q[i]), q, axes=1)
q = q - rij * Q[i]
rjj = do("linalg.norm", q, 2)
Q.append(q / rjj)
return do("stack", tuple(Q), axis=0)
def wrap_strict_check(larray):
fn_orig = larray._fn
@functools.wraps(fn_orig)
def checked(*args, **kwargs):
data = fn_orig(*args, **kwargs)
assert tuple(data.shape) == larray.shape
assert get_dtype_name(data) == larray.dtype
assert infer_backend(data) == larray.backend
return data
return checked
def make_strict(larray):
for node in larray:
larray._fn = wrap_strict_check(larray)
@pytest.mark.parametrize("backend", BACKENDS)
def test_lazy_mgs(backend):
if backend == "sparse":
pytest.xfail("Sparse doesn't support 'linalg.norm' yet...")
x = gen_rand((5, 5), backend)
lx = lazy.array(x)
ly = modified_gram_schmidt(lx)
make_strict(ly)
assert str(ly) == (
f"<LazyArray(fn=stack, shape=(5, 5), "
f"dtype=float64, backend='{backend}')>"
)
assert isinstance(ly, lazy.LazyArray)
hmax = ly.history_max_size()
hpeak = ly.history_peak_size()
htot = ly.history_total_size()
assert hmax == 25
assert 25 < hpeak < htot
assert len(tuple(ly)) == 57
assert len({node.fn_name for node in ly}) == 9
assert_allclose(to_numpy(ly.compute()), to_numpy(modified_gram_schmidt(x)))
with lazy.shared_intermediates():
ly = modified_gram_schmidt(lx)
make_strict(ly)
assert len(tuple(ly)) == 51
assert len({node.fn_name for node in ly}) == 9
assert_allclose(to_numpy(ly.compute()), to_numpy(modified_gram_schmidt(x)))
def test_partial_evaluation():
la = lazy.array(gen_rand((10, 10), "numpy"))
lb = lazy.array(gen_rand((10, 10), "numpy"))
lc = lazy.array(gen_rand((10, 10), "numpy"))
ld = lazy.array(gen_rand((10, 10), "numpy"))
lab = do("tanh", la @ lb)
lcd = lc @ ld
ls = lab + lcd
ld = do("abs", lab / lcd)
le = do("einsum", "ab,ba->a", ls, ld)
lf = do("sum", le)
make_strict(lf)
assert len(tuple(lf)) == 12
lf.compute_constants(variables=[lc, ld]) # constants = [la, lb]
assert len(tuple(lf)) == 9
assert "tanh" not in {node.fn_name for node in lf}
lf.compute()
def test_plot():
import matplotlib
matplotlib.use("Template")
la = lazy.array(gen_rand((10, 10), "numpy"))
lb = lazy.array(gen_rand((10, 10), "numpy"))
lc = lazy.array(gen_rand((10, 10), "numpy"))
ld = lazy.array(gen_rand((10, 10), "numpy"))
lab = do("tanh", la @ lb)
lcd = lc @ ld
ls = lab + lcd
ld = do("abs", lab / lcd)
le = do("einsum", "ab,ba->a", ls, ld)
lf = do("sum", le)
lf.plot()
lf.plot(variables=[lc, ld])
lf.plot_history_size_footprint()
def test_share_intermediates():
la = lazy.array(gen_rand((10, 10), "numpy"))
lb = lazy.array(gen_rand((10, 10), "numpy"))
l1 = do("tanh", la @ lb)
l2 = do("tanh", la @ lb)
ly = l1 + l2
assert len(tuple(ly)) == 7
y1 = ly.compute()
with lazy.shared_intermediates():
l1 = do("tanh", la @ lb)
l2 = do("tanh", la @ lb)
ly = l1 + l2
assert len(tuple(ly)) == 5
y2 = ly.compute()
assert_allclose(y1, y2)
@pytest.mark.parametrize("backend", BACKENDS)
def test_transpose_chain(backend):
lx = lazy.array(gen_rand((2, 3, 4, 5, 6), backend))
l1 = do("transpose", lx, (1, 0, 3, 2, 4))
l2 = do("transpose", l1, (1, 0, 3, 2, 4))
assert l2.args[0] is lx
assert l2.deps == (lx,)
assert len(tuple(l1)) == 2
assert len(tuple(l2)) == 2
assert_allclose(
to_numpy(lx.compute()), to_numpy(l2.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
def test_reshape_chain(backend):
lx = lazy.array(gen_rand((2, 3, 4, 5, 6), backend))
l1 = do("reshape", lx, (6, 4, 30))
l2 = do("reshape", l1, (-1,))
assert len(tuple(l1)) == 2
assert len(tuple(l2)) == 2
assert l2.args[0] is lx
assert l2.deps == (lx,)
assert_allclose(
to_numpy(lx.compute()).flatten(), to_numpy(l2.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_svd(backend, dtype):
if backend == "sparse":
pytest.xfail("Sparse doesn't support 'linalg.svd' yet...")
x = lazy.array(gen_rand((4, 5), backend, dtype))
U, s, VH = do("linalg.svd", x)
assert U.shape == (4, 4)
assert s.shape == (4,)
assert VH.shape == (4, 5)
s = astype(s, dtype)
ly = U @ (do("reshape", s, (-1, 1)) * VH)
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
def test_qr(backend):
if backend == "sparse":
pytest.xfail("Sparse doesn't support 'linalg.qr' yet...")
x = lazy.array(gen_rand((4, 5), backend))
Q, R = do("linalg.qr", x)
assert Q.shape == (4, 4)
assert R.shape == (4, 5)
ly = Q @ R
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_eig_inv(backend, dtype):
if backend in ("cupy", "dask", "torch", "mars", "sparse"):
pytest.xfail(f"{backend} doesn't support 'linalg.eig' yet...")
# N.B. the prob that a real gaussian matrix has all real eigenvalues is
# ``2**(-d * (d - 1) / 4)`` - see Edelman 1997 - so need ``d >> 5``
d = 20
x = lazy.array(gen_rand((d, d), backend, dtype))
el, ev = do("linalg.eig", x)
assert el.shape == (d,)
assert ev.shape == (d, d)
ly = ev @ (do("reshape", el, (-1, 1)) * do("linalg.inv", ev))
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_eigh(backend, dtype):
if backend in ("dask", "mars", "sparse",):
pytest.xfail(f"{backend} doesn't support 'linalg.eig' yet...")
x = lazy.array(gen_rand((5, 5), backend, dtype))
x = x + x.H
el, ev = do("linalg.eigh", x)
assert get_dtype_name(ev) == dtype
assert el.shape == (5,)
assert ev.shape == (5, 5)
ly = ev @ (do("reshape", el, (-1, 1)) * ev.H)
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_cholesky(backend, dtype):
if backend in ("sparse",):
pytest.xfail(f"{backend} doesn't support 'linalg.cholesky' yet...")
x = lazy.array(gen_rand((5, 5), backend, dtype))
x = x @ x.H
C = do("linalg.cholesky", x)
assert C.shape == (5, 5)
ly = C @ C.H
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("
|
gisodal/cumodoro
|
cumodoro/main.py
|
Python
|
mit
| 765 | 0.007843 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import curses
import cumodoro.config as config
import cumodoro.interface as interface
impo
|
rt cumodoro.globals as globals
from cumodoro.cursest import Refresher
import logging
log = logging.getLogger('cumodoro')
def set_title(msg):
print("\x1B]0;%s\x07" % msg)
def get_title():
print("\x1B[23t")
return sys.stdin.read()
def save_title():
prin
|
t("\x1B[22t")
def restore_title():
print("\x1B[23t")
def main():
globals.refresher = Refresher()
globals.refresher.start()
globals.database.create()
globals.database.load_tasks()
os.environ["ESCDELAY"] = "25"
save_title()
set_title("Cumodoro")
curses.wrapper(interface.main)
restore_title()
|
nion-software/nionswift
|
nion/swift/model/ApplicationData.py
|
Python
|
gpl-3.0
| 3,883 | 0.004378 |
"""
Stores application data.
"""
# standard libraries
import copy
import json
import pathlib
import threading
import typing
# third party libraries
from nion.swift.model import Utility
from nion.utils import Event
from nion.utils import StructuredModel
class ApplicationData:
"""Application data is a singleton that stores application data."""
def __init__(self, file_path: typing.Optional[pathlib.Path] = None) -> None:
self.__lock = threading.RLock()
self.__file_path = file_path
self.__data_dict: typing.Optional[typing.Dict[str, typing.Any]] = None
self.data_changed_event = Event.Event()
@property
def file_path(self) -> typing.Optional[pathlib.Path]:
return self.__file_path
@file_path.setter
def file_path(self, value: pathlib.Path) -> None:
self.__file_path = value
def get_data_dict(self) -> typing.Dict[str, typing.Any]:
with self.__lock:
data_changed = self.__read_data_dict()
result = copy.deepcopy(self.__data_dict) if self.__data_dict else dict()
if data_changed:
self.data_changed_event.fire()
return result
def set_data_dict(self, d: typing.Mapping[str, typing.Any]) -> None:
with self.__lock:
self.__data_dict = dict(d)
self.__write_data_dict()
self.data_changed_event.fire()
def __read_data_dict(self) -> bool:
if self.__data_dict is None and self.__file_path and self.__file_path.exists():
with open(self.__file_path) as f:
self.__data_dict = json.load(f)
return True
return False
def __write_data_dict(self) -> None:
if self.__file_path:
with Utility.AtomicFileWriter(self.__file_path) as fp:
json.dump(self.__data_dict, fp, skipkeys=True, indent=4)
__application_data = ApplicationData()
def set_file_path(file_path: pathlib.Path) -> None:
__application_data.file_path = file_path
def get_data() -> typing.Dict[str, typing.Any
|
]:
return __application_data.get_data_dict()
def set_data(d: typing.Mapping[str, typing.Any]) -> None:
__application_data.set_data_dict(d)
#
class SessionMetadata:
"""Session data is a singleton that stores application data via the Appli
|
cationData singleton."""
def __init__(self) -> None:
site_field = StructuredModel.define_field("site", StructuredModel.STRING)
instrument_field = StructuredModel.define_field("instrument", StructuredModel.STRING)
task_field = StructuredModel.define_field("task", StructuredModel.STRING)
microscopist_field = StructuredModel.define_field("microscopist", StructuredModel.STRING)
sample_field = StructuredModel.define_field("sample", StructuredModel.STRING)
sample_area_field = StructuredModel.define_field("sample_area", StructuredModel.STRING)
schema = StructuredModel.define_record("SessionMetadata", [site_field, instrument_field, task_field, microscopist_field, sample_field, sample_area_field])
self.__model = StructuredModel.build_model(schema, value=get_data().get("session_metadata", dict()))
def model_changed() -> None:
data = get_data()
data["session_metadata"] = self.__model.to_dict_value()
set_data(data)
self.__model_changed_listener = self.__model.model_changed_event.listen(model_changed)
@property
def model(self) -> StructuredModel.RecordModel:
return typing.cast(StructuredModel.RecordModel, self.__model)
__session_metadata = SessionMetadata()
def get_session_metadata_model() -> StructuredModel.RecordModel:
return __session_metadata.model
def get_session_metadata_dict() -> typing.Dict[str, typing.Any]:
return dict(typing.cast(typing.Mapping[str, typing.Any], __session_metadata.model.to_dict_value()))
|
OzFlux/PyFluxPro
|
scripts/constants.py
|
Python
|
gpl-3.0
| 4,163 | 0.017535 |
beta = 5 # "beta" value in adiabatic correction to wind profile
Cd = 840.0 # heat capacity of mineral component of soil, J/kg/K
Co = 1920.0 # heat capacity of organic component of soil, J/kg/K
Cp = 1004.67 # specific heat of dry air at constant pressure, J/kg-K
Cpd = 1004.67 # specific heat of dry air at constant pressure, J/kg-K
Cw = 4180.0 # heat capacity of water, J/kg/K
D0 = 10. # specific humidity deficit threshold for Lasslop et al (2010) NEE expression
E0_long = 100 # long term activation energy, default value
eps = 0.0000001 # a small number for comparing floats
g = 9.81 # gravitation constant
gamma = 28 # "gamma" value in adiabatic correction to wind profile
g2kg = 1E-3 # convert grams to kilograms
k = 0.4 # von Karmans constant
Lv = 2453600 # latent heat of vapourisation, J/kg
Mc = 0.0120107 # molecular weight of carbon, kg/mol
Mco2 = 0.04401 # molecular weight of carbon dioxide, kg/mol
Md = 0.02897 # molecular weight of dry air, kg/mol
missing_value = -9999 # missing data value
large_value = 1E35 # large value
small_value = -1E35 # small value
Mv = 0.01802 # molecular weight of water vapour, kg/mol
mu = Md/Mv # ratio of dry air molecular weight to water vapour molecular weight
rho_water = 1000.0 # density of water, kg/m^3
R = 8.314 # universal gas constant, J/mol.K
Rd = 287.04 # gas constant for dry air, J/kg/K
Rv = 461.5 # gas constant for water vapour, J/kg/K
Pi = 3.14159 # Pi
sb = 5.6704E-8 # Stefan-Boltzman constant, W/m^2/K^4
Tref = 15.0 # reference temperature in the Lloyd-Taylor respiration equation, degC
T0 = -46.02 # zero temp[erature in the Lloyd-Taylor respiration equation, degC
lwVert = 0.115 # vertical path length of CSAT3, m
lwHor = 0.058 # horizontal path length of CSAT3, m
lTv = 0.115 # path length of sonic virtual temperature, m
dIRGA = 0.0095 # path diameter of LI7500 IRGA, m
lIRGA = 0.127 # path l
|
ength of LI7500 IRGA, m
Tb = 1800 # 30-min period, seconds
C2
|
K = 273.15 # convert degrees celsius to kelvin
# dictionary of site names and time zones
tz_dict = {"adelaideriver":"Australia/Darwin",
"alicespringsmulga":"Australia/Darwin",
"arcturus":"Australia/Brisbane",
"calperum":"Australia/Adelaide",
"capetribulation":"Australia/Brisbane",
"cowbay":"Australia/Brisbane",
"cumberlandplains":"Australia/Sydney",
"cup_ec":"Australia/Sydney",
"daintree":"Australia/Brisbane",
"dalypasture":"Australia/Darwin",
"dalyregrowth":"Australia/Darwin",
"dalyuncleared":"Australia/Darwin",
"dargo":"Australia/Melbourne",
"dryriver":"Australia/Darwin",
"foggdam":"Australia/Darwin",
"gingin":"Australia/Perth",
"greatwestern":"Australia/Perth",
"gww":"Australia/Perth",
"howardsprings":"Australia/Darwin",
"litchfield":"Australia/Darwin",
"nimmo":"Australia/Sydney",
"reddirt":"Australia/Darwin",
"riggs":"Australia/Melbourne",
"robson":"Australia/Brisbane",
"samford":"Australia/Brisbane",
"sturtplains":"Australia/Darwin",
"titreeeast":"Australia/Darwin",
"tumbarumba":"Australia/Canberra",
"wallaby":"Australia/Melbourne",
"warra":"Australia/Hobart",
"whroo":"Australia/Melbourne",
"wombat":"Australia/Melbourne",
"yanco_jaxa":"Australia/Sydney"}
units_synonyms = {"Fsd":["W/m^2","W+1m-2"],
"Fsu":["W/m^2","W+1m-2"],
"Fld":["W/m^2","W+1m-2"],
"Flu":["W/m^2","W+1m-2"],
"Fn":["W/m^2","W+1m-2"],
"Fg":["W/m^2","W+1m-2"],
"Precip":["mm"],
"ps":["kPa"],
"RH":["%","percent"],
"Sws":["frac","m^3/m^3","m+3m-3"],
"Ta":["C","degC"],
"Ts":["C","degC"],
"Wd":["degT","deg","degrees"],
"Ws":["m/s","m+1s-1"]}
|
muffl0n/ansible-modules-extras
|
cloud/vmware/vca_fw.py
|
Python
|
gpl-3.0
| 14,207 | 0.011755 |
#!/usr/bin/python
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vca_fw
short_description: add remove firewall rules in a gateway in a vca
description:
- Adds or removes firewall rules from a gateway in a vca environment
version_added: "2.0"
options:
username:
description:
- The vca username or email address, if not set the environment variable VCA_USER is checked for the username.
required: false
default: None
password:
description:
- The vca password, if not set the environment variable VCA_PASS is checked for the password
required: false
default: None
org:
description:
- The org to login to for creating vapp, mostly set when the service_type is vdc.
required: false
default: None
service_id:
description:
- The service id in a vchs environment to be used for creating the vapp
required: false
default: None
host:
description:
- The authentication host to be used when service type is vcd.
required: false
default: None
api_version:
description:
- The api version to be used with the vca
required: false
default: "5.7"
service_type:
description:
- The type of service we are authenticating against
required: false
default: vca
choices: [ "vca", "vchs", "vcd" ]
state:
description:
- if the object should be added or removed
required: false
default: present
choices: [ "present", "absent" ]
verify_certs:
description:
- If the certificates of the authentication is to be verified
required: false
default: True
vdc_name:
description:
- The name of the vdc where the gateway is located.
required: false
default: None
gateway_name:
description:
- The name of the gateway of the vdc where the rule should be added
required: false
default: gateway
fw_rules:
description:
- A list of firewall rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
'''
EXAMPLES = '''
#Add a set of firewall rules
- hosts: localhost
connection: local
tasks:
- vca_fw:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'absent'
fw_rules:
- description: "ben testing"
source_ip: "Any"
dest_ip: 192.168.2.11
- description: "ben testing 2"
source_ip: 192.168.2.100
source_port: "Any"
dest_port: "22"
dest_ip: 192.168.2.13
is_enable: "true"
enable_logging: "false"
protocol: "Tcp"
policy: "allow"
'''
import time, json, xmltodict
HAS_PYVCLOUD = False
try:
from pyvcloud.vcloudair import VCA
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
HAS_PYVCLOUD = True
except ImportError:
pass
SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'}
LOGIN_HOST = {}
LOGIN_HOST['vca'] = 'vca.vmware.com'
LOGIN_HOST['vchs'] = 'vchs.vmware.com'
VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description', 'dest_ip', 'dest_port', 'source_ip', 'source_port', 'protocol']
def vca_login(module=None):
service_type = module.params.get('service_type')
username = module.params.get('username')
password = module.params.get('password')
instance = module.params.get('instance_id')
org = module.params.get('org')
service = module.params.get('service_id')
vdc_name = module.params.get('vdc_name')
version = module.params.get('api_version')
verify = module.params.get('verify_certs')
if not vdc_name:
if service_type == 'vchs':
vdc_name = module.params.get('service_id')
if not org:
if service_type == 'vchs':
if vdc_name:
org = vdc_name
else:
org = service
if service_type == 'vcd':
host = module.params.get('host')
else:
host = LOGIN_HOST[service_type]
if not username:
if 'VCA_USER' in os.environ:
username = os.environ['VCA_USER']
if not password:
if 'VCA_PASS' in os.environ:
password = os.environ['VCA_PASS']
if not username or not password:
module.fail_json(msg = "Either the username or password is not set, please check")
if service_type == 'vchs':
version = '5.6'
if service_type == 'vcd':
if not version:
version == '5.6'
vca = VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify)
if service_type == 'vca':
if not vca.login(password=password):
module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content)
if not vca.login_to_instance(password=password, instance=instance, token=None, org_url=None):
s_json = serialize_instances(vca.instances)
module.fail_json(msg = "Login to Instance failed: Seems like instance_id provided is wrong ..
|
Please check",\
valid_instances=s_json)
if not vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token,
org_url=vca.vcloud_session.org_url):
module.fail_json(msg = "Error logging into org for the instance", error=vca.response.content)
return vca
if service_type == 'vchs':
if not vca.login(password=password):
module.fail_json(msg = "Lo
|
gin Failed: Please check username or password", error=vca.response.content)
if not vca.login(token=vca.token):
module.fail_json(msg = "Failed to get the token", error=vca.response.content)
if not vca.login_to_org(service, org):
module.fail_json(msg = "Failed to login to org, Please check the orgname", error=vca.response.content)
return vca
if service_type == 'vcd':
if not vca.login(password=password, org=org):
module.fail_json(msg = "Login Failed: Please check username or password or host parameters")
if not vca.login(password=password, org=org):
module.fail_json(msg = "Failed to get the token", error=vca.response.content)
if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url):
module.fail_json(msg = "Failed to login to org", error=vca.response.content)
return vca
def validate_fw_rules(module=None, fw_rules=None):
VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Any']
for rule in fw_rules:
if not isinstance(rule, dict):
module.fail_json(msg="Firewall rules must be a list of dictionaries, Please check", valid_keys=VALID_RULE_KEYS)
for k in rule.keys():
if k not in VALID_RULE_KEYS:
module.fail_json(msg="%s is not a valid key in fw rules, Please check above.." %k, valid_keys=VALID_RULE_KEYS)
rule['dest_port'] = rule.get('dest_port', 'Any')
rule['dest_ip'] = rule.get('dest_ip', 'Any')
rule['source_port'] = rule.get('source_port', 'Any')
rule['source_ip'] = rule.get('source_ip', 'Any')
rule['protocol
|
Verizon/hlx
|
tests/blackbox/examples/bb_test_basic.py
|
Python
|
apache-2.0
| 1,993 | 0.002509 |
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
from nose.tools import with_setup
import subprocess
import requests
import os
from .. import util
import time
import json
# ------------------------------------------------------------------------------
# Constants
# ------------------------------------------------------------------------------
G_TEST_HOST = 'http://127.0.0.1:12345/'
# ------------------------------------------------------------------------------
# globals
# ------------------------------------------------------------------------------
g_server_pid = -1
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def setup_func():
global g_server_pid
l_subproc = subprocess.Popen(["../../build/examples/basic"])
g_server_pid = l_subproc.pid
time.sleep(0.2)
# -----------------------
|
----------------------------------------------------
|
---
#
# ------------------------------------------------------------------------------
def teardown_func():
global g_server_pid
l_code, l_out, l_err = util.run_command('kill -9 %d'%(g_server_pid))
time.sleep(0.2)
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
@with_setup(setup_func, teardown_func)
def bb_test_basic_001():
# Unimplemented request
l_e = G_TEST_HOST + 'bleep/bloop/blop'
l_r = requests.get(l_e)
assert l_r.status_code == 501
l_r_json = json.loads(l_r.content)
assert l_r_json != None
assert len(l_r_json['errors']) > 0
assert l_r_json['errors'][0]['code'] == 501
# Valid request
l_e = G_TEST_HOST + 'bananas'
l_r = requests.get(l_e)
assert l_r.status_code == 200
assert 'Hello World' in l_r.content
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram2d/stream/_token.py
|
Python
|
mit
| 499 | 0.002004 |
import _plotly_utils.basevalid
|
ators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="to
|
ken", parent_name="histogram2d.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
|
brownjm/praisetex
|
song.py
|
Python
|
gpl-3.0
| 2,932 | 0.006139 |
"""New song class to work with a plain text song file format"""
import os
import re
chord_regex = re.compile("[A-G][1-9#bminajsugd]*[/]*[A-G]*[1-9#bminajsugd]*")
valid_chords = "ABCDEFGb#minajsugd123456789"
not_chords = "HJKLOPQRTVWXYZ\n"
class Chord(object):
"""Represents a single chord within a song file"""
def __init__(self, chord):
self.text = chord
def __repr__(self):
return "Chord({})".format(self.text)
class Chordline(object):
"""Represents multiple chords that are on a separate line"""
def __init__(self, chords):
self.text = chords
def __repr__(self):
return "Chordline({})".format(self.text)
class Text(object):
"""Represents plain text, such as lyrics, within a song file"""
def __init__(self, text):
self.text = text
def __repr__(self):
return "Text({})".format(self.text)
def combine(chord_line, lyrics):
"""Combines a line of chords with its associated lyrics"""
# make sure the lyrics line is long enough to hold chords
if(len(chord_line) > len(lyrics)):
lyrics = lyrics.ljust(len(chord_line))
# find valid chords
matches = chord_regex.finditer(chord_line)
# list of (location, chord)
chords = list(zip([match.start() for match in matches],
chord_line.split
|
()))
# insert chords in verse order since insertion shifts positions of subsequent chords
combined = []
chords.reverse()
for chor
|
d in chords:
loc, ch = chord
combined.append(Text(lyrics[loc:]))
combined.append(Chord(ch))
lyrics = lyrics[:loc]
if len(lyrics) > 0: # handle any leftover text before first chord
combined.append(Text(lyrics))
combined.reverse()
return combined
def is_chord_line(line):
"""Checks if the line contains chords"""
if contains_only(line, valid_chords) and not contains_any(line, not_chords):
return True
else:
return False
def find_commands(text):
"""Returns a list of line numbers which contain a colon, representing a command"""
line_numbers = []
num = 0
for line in text:
if ":" in line:
line_numbers.append(num)
num += 1
return line_numbers
def contains_any(line, letters):
"""Check if any of the letters are in the line"""
for letter in letters:
if letter in line:
return True
return False
def contains_only(line, letters):
"""Check if the line only contains these letters"""
for c in line:
if c.isalnum(): # if character is alphanumeric
if c in letters:
continue
else: # character not found in letters
return False
else: # ignore non-alphanumeric characters
continue
return True
if __name__ == '__main__':
s = Song('songs/10000Reasons.txt')
c = s.attributes['chorus 1']
|
fretscha/pfa
|
config/settings/common.py
|
Python
|
bsd-3-clause
| 8,513 | 0.000822 |
# -*- coding: utf-8 -*-
"""
Django settings for pfa project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('pfa')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'session_security', # session timeout management
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pfa.users', # custom users app
'pfa.postfix',
'pfa.api',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Session inactivity management
'session_security.middleware.SessionSecurityMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'pfa.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Frederic Tschannen""", 'fts@umx.net'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------
|
------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres://localhost/pfa"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# -------
|
-----------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processors go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
str(APPS_DIR.path('templates')),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['req
|
mgaitan/waliki_flask
|
waliki/extensions/uploads.py
|
Python
|
bsd-3-clause
| 4,692 | 0.008316 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Martín Gaitán
# Copyright (c) 2012-2013, Alexander Jung-Loddenkemper
# This file is part of Waliki (http://waliki.nqnwebs.com/)
# License: BSD (https://github.com/mgaitan/waliki/blob/master/LICENSE)
#===============================================================================
# DOCS
#===============================================================================
"""Plugin for upload files to waliki webpages"""
#===============================================================================
# IMPORTS
#===============================================================================
import os.path
import imghdr
from flaskext.uploads import (
UploadSet, ALL, configure_uploads, patch_request_class
)
from flask import (render_template, flash, request, Blueprint, current_app,
abort, send_file, url_for, jsonify)
#===============================================================================
# CONSTANTS
#===============================================================================
CLOSE_WINDOW_HTML = """
<html>
<head>
<script type="text/javascript">
window.close();
</script>
</head>
<body>
</body>
</html>"""
#===============================================================================
# BLUEPRINT AND BASE SETUP
#===============================================================================
def default_dest(app):
return os.path.join(app.config.get('CONTENT_DIR'), 'uploads')
media = UploadSet('media', ALL, default_dest=default_dest)
uploads = Blueprint('uploads', __name__, template_folder='templates')
#===============================================================================
# SLOT
#===============================================================================
def extra_actions(page, **extra):
context = extra['extra_context']
actions = context.get('extra_actions', [])
actions.append(('Attachments', url_for('uploads.upload', url=extra.get('url'))))
context['extra_actions'] = actions
#===============================================================================
# INITIALIZER
#===============================================================================
REQUIREMENTS = ["Flask-Uploads"]
def init(app):
app.register_blueprint(uploads)
configure_uploads(app, media)
app.signals.signal('pre-edit').connect(extra_actions)
patch_request_class(app, 32 * 1024 * 1024) # limit 32mb
#===============================================================================
# ROUTES
#===============================================================================
@uploads.route('/<path:url>/_upload', methods=['GET', 'POST'])
def upload(url):
last_attached = None
page = current_app.wiki.get_or_404(url)
if request.method == 'POST' and 'attach' in request.files:
last_attached = request.files['attach']
media.save(last_attached, folder=page.url)
flash('"%s" was attached succesfully to /%s' % (last_attached.filename,
page.url))
try:
files = os.listdir(os.path.join(current_app.config.get('CONTENT_DIR'),
'uploads', page.url))
except OSError:
files = []
return render_template('upload.html', page=page, files=files,
markup=current_app.wiki.markup)
def _base_file(url, filename):
page = current_app.wiki.get_or_404(url)
directory = os.path.join(current_app.config.get('CONTENT_DIR'),
'uploads', url)
try:
files = os.listdir(directory)
except OSError:
files = []
if not filename in files:
abort(404)
outfile = os.path.join(directory, filename)
return outfile
@uploads.route('/<path:url>/_attachment/<filename>')
def get_file(url, filename):
outfile = _base_file(url, filename)
# by default only images are embeddable.
as_attachment = ((not imghdr.what(outfile) and 'embed' not in request.args)
or 'as_attachment' in request.args)
return send_file(outfile, as_attachment=as_attachment)
@uploads.route('/<path:url>/_remove/<filename>', methods=['POST', 'DELETE'])
def remove_file(url, filename):
outfile = _base_file(url, filename)
try:
os.remove(outfile)
finally:
return jsonify({'removed': filename})
return jsonify({'removed': None})
#===============================================================================
#
|
MAIN
#=================================================================
|
==============
if __name__ == "__main__":
print(__doc__)
|
prabodhprakash/problemsolving
|
spoj/EBOXES.py
|
Python
|
mit
| 148 | 0.033784 |
n
|
o_inputs = int(raw_input())
for i in range (0, no_inputs):
n, k, t, f = map(int, raw_input().split())
answer = n + k*((f-n)/(k-1))
print an
|
swer
|
Kinggerm/GetOrganelle
|
Utilities/evaluate_assembly_using_mapping.py
|
Python
|
gpl-3.0
| 29,914 | 0.005517 |
#! /usr/bin/env python
# coding:utf8
from argparse import ArgumentParser
import os
import sys
PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(0,
|
os.path.join(PATH_OF_THIS_SCRIPT, ".."))
import GetOrganelleLib
from GetOrganelleLib.pipe_control_func import *
from GetOrganelleLib.seq_parser import *
from GetOrganelleLib.sam_parser import *
from GetOrganelleLib.statistical_func import *
from GetOrganelleLib.versions import get_versions
PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0]
from sympy import Interval
import sys
|
import platform
SYSTEM_NAME = ""
if platform.system() == "Linux":
SYSTEM_NAME = "linux"
elif platform.system() == "Darwin":
SYSTEM_NAME = "macOS"
else:
sys.stdout.write("Error: currently GetOrganelle is not supported for " + platform.system() + "! ")
exit()
GO_LIB_PATH = os.path.split(GetOrganelleLib.__file__)[0]
GO_DEP_PATH = os.path.realpath(os.path.join(GO_LIB_PATH, "..", "GetOrganelleDep", SYSTEM_NAME))
try:
# python2 UnicodeDecodeError ±
reload(sys)
sys.setdefaultencoding('utf8')
except NameError:
pass
def get_options():
parser = ArgumentParser("evaluate_assembly_using_mapping.py -f fasta_file -1 RAW_1.fq -2 RAW_2.fq -o output")
parser.add_argument("-f", dest="fasta",
help="input assembly fasta file.")
parser.add_argument("-1", dest="original_fq_1")
parser.add_argument("-2", dest="original_fq_2")
parser.add_argument("-u", dest="unpaired_fq_files", default="",
help="Input file(s) with unpaired (single-end) reads to be added to the pool. "
"files could be comma-separated lists such as 'seq1,seq2'.")
parser.add_argument("-X", "--max-lib-len", dest="max_lib_len", type=int, default=1200,
help="Corresponding to '-X' option in Bowtie2. Default: %(default)s.")
parser.add_argument("-c", dest="is_circular", default="auto",
help="(yes/no/auto) input fasta is circular. "
"If auto was chosen, the input fasta would be treated as circular when the sequence name "
"ends with '(circular)'. "
"Default: auto")
parser.add_argument("-o", dest="output_base",
help="output folder.")
parser.add_argument("-t", dest="threads", type=int, default=2,
help="threads.")
parser.add_argument("--continue", dest="resume", default=False, action="store_true")
parser.add_argument("--seed", dest="random_seed", default=12345, type=int,
help="Seed for random number generator. Default: %(default)s")
parser.add_argument("--draw", dest="draw_plot", default=False, action="store_true",
help="Draw density plot using matplotlib, which should be installed.")
parser.add_argument("--plot-format", dest="plot_format", default="pdf,png",
help='Default: pdf,png')
parser.add_argument("--plot-title", dest="plot_title",
help="Default: `the file name of the input fasta`")
parser.add_argument("--plot-subtitle", dest="plot_subtitle", default="",
help="A 4-space indicates a line break. Default: None")
parser.add_argument("--plot-transparent", dest="plot_transparent", default=False, action="store_true",
help="Default: False")
parser.add_argument("--plot-x-density", dest="plot_x_density", default=12000., type=float,
help="Default: %(default)s")
# parser.add_argument("--plot-x-sliding-window", dest="sliding_window_size", default=1, type=int,
# help="Default: %(default)s")
parser.add_argument("--plot-x-gap-dots", dest="gap_len", default=3000, type=int,
help="Number of sites added in-between isolated contigs. Default: %(default)s")
parser.add_argument("--plot-figure-height", dest="figure_height", default=5., type=float,
help="Default: %(default)s")
parser.add_argument("--plot-y-lim", dest="y_lim", type=float,
help="Y axis value limit. ")
# parser.add_argument("--plot-figure-extra-width", dest="extra_width", default=3., type=float,
# help="Default: %(default)s")
parser.add_argument("--plot-font", dest="plot_font", default=None,
help="For plot of unicode characters in some environments. Use 'Times New Roman','Arial' etc. "
"Default: %(default)s.")
parser.add_argument("--disable-customized-error-rate", dest="customized_error_rate", default=True,
action="store_true")
parser.add_argument("--which-bowtie2", dest="which_bowtie2", default="",
help="Assign the path to Bowtie2 binary files if not added to the path. "
"Default: try GetOrganelleDep/" + SYSTEM_NAME + "/bowtie2 first, then $PATH")
parser.add_argument("--bowtie2-mode", dest="bowtie2_mode", default="--sensitive",
help="Default: %(default)s")
parser.add_argument("--bowtie2-options", dest="other_bowtie2_options", default="--no-discordant --dovetail",
help="Default: %(default)s")
parser.add_argument("--stat-mode", dest="stat_mode", default="best",
help="Statistical mode for counting multiple hits of a single read: best/all. "
"The all mode is meaningful only when '-k <INT>' was included in '--bowtie2-options'. "
"Default: %(default)s")
parser.add_argument("--debug", dest="debug_mode", default=False, action="store_true",
help="Turn on debug mode.")
parser.add_argument("-v", "--version", action="version",
version="GetOrganelle v{version}".format(version=get_versions()))
options = parser.parse_args()
if not (options.fasta and
((options.original_fq_1 and options.original_fq_2) or options.unpaired_fq_files)
and options.output_base):
sys.stderr.write("Insufficient arguments!\n")
sys.exit()
if not os.path.isdir(options.output_base):
os.mkdir(options.output_base)
if options.debug_mode:
log_level = "DEBUG"
else:
log_level = "INFO"
assert options.stat_mode in ("best", "all")
log_handler = simple_log(logging.getLogger(), options.output_base, "", log_level=log_level)
log_handler.info("")
log_handler.info("Python " + str(sys.version).replace("\n", " "))
log_handler.info("PLATFORM: " + " ".join(platform.uname()))
# log versions of python libs
lib_versions_info = []
if options.draw_plot:
try:
import matplotlib
except ImportError:
pass
else:
lib_versions_info.append("matplotlib " + matplotlib.__version__)
lib_versions_info.append("GetOrganelleLib " + GetOrganelleLib.__version__)
log_handler.info("PYTHON LIBS: " + "; ".join(lib_versions_info))
# log versions of dependencies
dep_versions_info = []
if not options.which_bowtie2:
try_this_bin = os.path.join(GO_DEP_PATH, "bowtie2", "bowtie2")
if os.path.isfile(try_this_bin) and executable(try_this_bin):
options.which_bowtie2 = os.path.split(try_this_bin)[0]
if not executable(os.path.join(options.which_bowtie2, "bowtie2")):
log_handler.error(os.path.join(options.which_bowtie2, "bowtie2") + " not accessible!")
exit()
else:
output, err = subprocess.Popen(
os.path.join(options.which_bowtie2, "bowtie2") + " --version", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True).communicate()
this_lines = output.decode("utf8").split("\n")[:3]
dep_versions_info.append("Bowtie2 " + this_lines[0].split()[-1].strip())
if not executable(os.path.join(options.which_bowtie2, "bowtie2-build") + " --large-index"):
log_handler.error(os.path.join(options.which_bowtie2, "bowtie2-build") + " not accessible!")
exit()
log_handler
|
okfde/froide-campaign
|
froide_campaign/migrations/0008_campaignpage.py
|
Python
|
mit
| 1,181 | 0.000847 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-18 15:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("froide_campaign", "0007_campaign_subject_template"),
]
operations = [
migrations.CreateModel(
name="CampaignPage",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255)),
("slug", models.SlugField()),
|
("description", models.TextField(blank=True)),
("public", models.BooleanField(default=False)),
("campaigns", models.ManyToManyField(to="fro
|
ide_campaign.Campaign")),
],
options={
"verbose_name": "Campaign page",
"verbose_name_plural": "Campaign pages",
},
),
]
|
zamanashiq3/code-DNN
|
time_dis_cnn.py
|
Python
|
mit
| 2,449 | 0.02205 |
"""
Multiple stacked lstm implemeation on the lip movement data.
Akm Ashiquzzaman
13101002@uap-bd.edu
Fall 2016
"""
from __future__ import print_function
import numpy as np
np.random.seed(1337)
|
#random seed fixing for reproducibility
#data load & preprocessing
X_train = np.load('../data/videopart43.npy').astype('float32')
Y_train = np.load('../data/audiopart43.npy').astype('float32')
#normalizing data
X_train = X_train/255
Y_train = Y_train/32767
X_train = X_train.reshape((826,13,1,53,53)).astype('float32')
Y_train = Y_train.reshape((826,13*4702)).astype('float32')
from keras.models import Sequential
from keras.layers import Dense,Activation,Dropout,TimeDistribu
|
ted,LSTM,Bidirectional
from keras.layers import Convolution2D,Flatten,MaxPooling2D
import time
print("Building Model.....")
model_time = time.time()
model = Sequential()
model.add(TimeDistributed(Convolution2D(64, 3, 3,border_mode='valid'),batch_input_shape=(14,13,1,53,53),input_shape=(13,1,53,53)))
model.add(Activation('tanh'))
model.add(Dropout(0.25))
model.add(TimeDistributed(Convolution2D(32, 2, 2, border_mode='valid')))
model.add(Activation('tanh'))
model.add(TimeDistributed(Flatten()))
model.add(Bidirectional(LSTM(256,return_sequences=True,stateful=True)))
model.add(Dropout(0.20))
model.add(Bidirectional(LSTM(128,return_sequences=True,stateful=True)))
model.add(Dropout(0.20))
model.add((LSTM(64,stateful=True)))
model.add(Dropout(0.20))
model.add((Dense(512)))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add((Dense(13*4702)))
model.add(Activation('tanh'))
model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
#checkpoint import
from keras.callbacks import ModelCheckpoint
from os.path import isfile, join
#weight file name
weight_file = '../weights/time-dis-cnn_weight.h5'
#loading previous weight file for resuming training
if isfile(weight_file):
model.load_weights(weight_file)
#weight-checkmark
checkpoint = ModelCheckpoint(weight_file, monitor='acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
print("model compile time: "+str(time.time()-model_time)+'s')
# fit the model
model.fit(X_train,Y_train, nb_epoch=1, batch_size=14,callbacks=callbacks_list)
pred = model.predict(X_train,batch_size=14,verbose=1)
pred = pred*32767
pred = pred.reshape(826*13,4702)
print('pred shape',pred.shape)
print('pred dtype',pred.dtype)
np.save('../predictions/pred-time-cnn.npy',pred)
|
smartforceplus/SmartForceplus
|
openerp/addons/project_stage/__openerp__.py
|
Python
|
agpl-3.0
| 1,603 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'TAG Project Stage',
'version': '1.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Project Stage
=====================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'project',
],
'data': [
'view/project_view.xml',
'view/menu_item.xml',
'security/ir.model
|
.access.csv',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtab
|
stop=4:shiftwidth=4:
|
lbybee/NVLDA
|
code/build_dataset.py
|
Python
|
gpl-2.0
| 865 | 0.002312 |
from datetime import datetime
import csv
import pandas
import os
import sys
os.chdir(sys.argv[1])
ticker_f = open(sys.argv[2], "rb")
ticker_reader = csv.reader(ticker_f)
tickers = [r[0] for r in ticker_reader][1:]
tic
|
ker_f.close()
tln = len(tickers)
t_1 = datetime.now()
# build full data frame
res = None
for i, t in enumerate(tickers):
t_n = t.split("/")[1]
df = pandas.io.parsers.read_csv("%s.csv" % t_n)
df[t_n] = (df["Close"].shift(1) - df["Close"]) / df["Close"]
df = df[["Date", t_n]]
df.set_index("Date")
if res is None:
res = df
else:
res = res.merge(d
|
f, on="Date", how="outer")
print i, i * 100. / tln, datetime.now() - t_1
res = res.dropna(axis=0, int(sys.argv[3])) # drop many missing obs
res = res.dropna(axis=1, int(sys.argv[4])) # drop many missing vars
res = res.dropna()
res.to_csv(sys.argv[5])
|
nikhilponnuru/codeCrumbs
|
code/code_display.py
|
Python
|
mit
| 3,605 | 0.035229 |
#!/usr/bin/env python
#want to display file contents
#testing display code
import pyperclip
import re
import subprocess
import os,sys,time
counter=1
already_checked=''
def get_extension(file_name):
if file_name.find('.')!=-1:
ext = file_name.split('.')
return (ext[1])
else:
return 'txt'
def cut(str, len1):
return str[len1 + 1:] #to remove first line which is meant for reading from which file
#for displaying contents
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
#ubuntu notification (message sending)
def sendmessage(message):
subprocess.Popen(['notify-send', message])
return
while(True):
str = pyperclip.paste() #for bring the content of clipboard to str variable
str_low= str.lower()
str_lower =str_low.split("\n")
if(str_lower[0]=="stop -safe"):
sendmessage("Stopped the background process for code snippet management...byebye")
os.exit()
if (str_lower[0].find("from")!=-1 and str_lower[0].find("code") !=-1 and
str_lower[0].find("snippet") !=-1 and str_lower[0].find("-safe") !=-1):
if (re.search(r'\w+\.[a-z,A-Z,0-9]',str_lower[0])!=None and str_lower[0].find("-deep")==-1): #filename is given
str1=str.split(' ') #split() returns a list [ ]
file_str=str1[(len(str1)-2)] #in the first line take the last seconde element of list
file_str=file_str.replace(".txt"," ") #if filename.txt is given remove txt and search --for all other we need extension
if(file_str==already_checked):
continue
str2= find(file_str,"/home/nikhil/code_snippets") #finding the directory of the file from where code to be copied
try:
file1= open(str2,"r")
except:
print "ohhh mann"
sendmessage("file not found in codesnippets sorry")
already_checked=file_str
continue
pyperclip.copy(file1.read())
sendmessage("Fetched press ctrl+v")
elif (str_lower[0].find("-deep")!=-1 and re.search("\'[a-z,A-Z,0-9, ]+\'",str_lower[0])!=None):#text is given and need to grep it
search_string= r
|
e.search("\'[a-z,A-Z,0-9
|
, ]+\'",str_lower[0])
if search_string!=None:
entered_string = search_string.group()
final_search_string=entered_string[1:len(entered_string)-1]
try:
hosts = subprocess.check_output("grep '%s' /home/nikhil/code_snippets -r" % (final_search_string), shell=True) #http://stackoverflow.com/questions/12809467/how-to-get-output-of-grep-command-python
lists=re.findall(r"/[a-z,A-Z,0-9]+\.[a-z]+",hosts)
#befor using below line e.g:- /ooh.py
s=lists[0][1:]
#after using above line e.g:-ooh.py
extension=get_extension(s)
print extension
file_obj=open('/home/nikhil/code_snippets/'+extension.upper()+'/'+s,'r')
pyperclip.copy(file_obj.read())
sendmessage("Success..Fetched!")
except:
sendmessage("unable to find")
else:
sendmessage("You Entered Non Existing Search String..")
os.system('python /home/nikhil/Desktop/haha.py')
#todo file not found exception is being raised --unlimited times
#same comment in multiple files means it is showing only first file with that comment --handle this
|
skill-huddle/skill-huddle
|
dummy_data/populatedb.py
|
Python
|
mit
| 10,113 | 0.014734 |
import os,sys,django
sys.path.append(os.path.dirname(os.path.abspath('.')))
os.environ["DJANGO_SETTINGS_MODULE"] = 'skill_huddle.settings'
django.setup()
from sh_app.models import SH_User,League,Suggestion,Huddle
from django.contrib.auth.models import User
from django_countries import countries
from localflavor.us.us_states import STATE_CHOICES
from django.utils import timezone
import random
def createUsers():
random.seed()
with open('adjectives.txt','r') as adjs,\
open('nouns.txt','r') as nouns:
list_adjs = adjs.readlines()
list_nouns = nouns.readlines()
for i in range(1,100):
#create and save user object
#random user name
first_name = list_adjs[random.randint(0,len(list_adjs))].replace('\n','')
last_name = list_nouns[random.randint(0,len(list_nouns))].replace('\n','')
usrname = (first_name + '_' + last_name)[:30]
usr = User(username = usrname,email = "gen123@gmail.com")
usr.set_password("zxcvbnm,")
usr.first_name = first_name
usr.last_name = last_name
sh = SH_User()
try:
usr.save()
#create and save sh user
sh.user = usr
sh.first_name = first_name
sh.last_name = last_name
sh.save()
except:
mesage = "failed to create user:%s" % usrname
print(mesage)
def createLeagues():
random.seed()
sh_users = SH_User.objects.all()
list_countries = list(map(lambda x: x[0],list(countries)))
list_states = list(map(lambda x: x[0],list(STATE_CHOICES)))
string_book = ''
with open('aristotle.txt','r') as fi:
string_book = fi.read()
for i in range(1,10):
new_league = League()
with open('adjectives.txt','r') as adjs,open('nouns.txt','r') as nouns:
list_adjs = adjs.readlines()
list_nouns = nouns.readlines()
name = "%s %s league" % (list_adjs[random.randint(0,len(list_adjs)-1)].replace('\n',''), list_nouns[random.randint(0,len(list_nouns))].replace('\n',''))
desc_start = random.randint(0,82824 - 300)
description = string_book[desc_start : desc_start + 160]
country = list_countries[random.randint(0,len(list_countries) -1)]
if country == 'US':
new_league.state = list_states[random.randint(0,len(list_states) -1)]
city = list_nouns[random.randint(0,len(list_nouns))].replace('\n','')
new_league.city = city
new_league.name = name.capitalize()
new_league.decription = description
new_league.country = country
new_league.head_official = sh_users[random.randint(0,len(sh_users) - 1)]
try:
new_league.save()
new_league.officials.add(new_league.head_official)
new_league.members.add(new_league.head_official)
except:
errormsg = 'Failed to create league: %s' % new_league.name
print(errormsg)
def addLeagueMembers():
random.seed()
#add sh_users to the list
for league in League.objects.all():
usrs = list(SH_User.objects.all())
usrs.remove(league.head_official)
for i in range(0,25):
new_member = usrs[random.randint(0,len(usrs) - 1)]
usrs.remove(new_member)
try:
league.members.add(new_member)
except:
errormsg = "Failed to add member: %s" % new_member
print(errormsg)
def addLeagueOfficials():
random.seed()
for league in League.objects.all():
list_members = list(league.members.all())
list_members.remove(league.head_official)
for i in range(0,3):
new_official = list_members[random.randint(0,len(list_members) -1)]
list_members.remove(new_official)
try:
league.officials.add(new_official)
except:
errormsg = "Feiled to add official: %s" % new_official
def createSuggestions():
random.seed()
with open('adjectives.txt','r') as adjs,\
open('nouns.txt','r') as nouns:
list_adjs = adjs.readlines()
list_nouns = nouns.readlines()
string_book = ''
with open('aristotle.txt','r') as fi:
string_book = fi.read()
for league in League.objects.all():
for i in range(0,10):
tot_members = league.members.count()
rand_user = league.members.all()[random.randint(0,tot_members -1)]
name = list_adjs[random.randint(0,len(list_adjs)-1)].strip('\n') +\
" " + list_nouns[random.randint(0,len(list_nouns)-1)].strip('\n') +\
" " + list_nouns[random.randint(0,len(list_nouns)-1)] + " suggestion"
desc_start = random.randint(0,82824 - 300)
description = string_book[desc_start: desc_start + 200]
new_suggestion = Suggestion()
new_suggestion.name = name.capitalize()
new_suggestion.suggested_by = rand_user
new_suggestion.description = description
new_suggestion.voting_starts = timezone.now() -\
timezone.timedelta(days=random.randint(0,10))
new_suggestion.voting_ends = new_suggestion.voting_starts +\
timezone.timedelta(days=random.randint(1,10))
try:
new_suggestion.league = league
new_suggestion.save()
if new_suggestion.voting_ends < timezone.now():
random_int = random.randint(0, 2)
if random_int == 0:
for sh_user in league.members.all():
new_suggestion.upvotes.add(sh_user)
new_suggestion.is_accepted = True
new_suggestion.save()
except:
errormsg = "Failed to add Suggestion: %s" % new_suggestion
print(errormsg)
def voteOnSuggestions():
random.seed()
for league in League.objects.all():
for suggestion in league.suggestions.all():
for member in league.members.all():
votetype = random.randint(0,2)
if votetype > 0:
if votetype == 1:
#upvote
try:
suggestion.upvotes.add(member)
except:
errormsg = "Failed to add upvoter %s" % member
print(errormsg)
else:
#downvote
try:
suggestion.downvotes.add(member)
except:
errormsg = "Failed to add downvoter %s" % member
print(errormsg)
def clearVotes():
for league in League.objects.all():
for suggestion in league.suggestions.all():
try:
suggestion.upvotes.clear()
except:
errormsg = "Failed to clear upvotes for %s" % suggestion
print(errormsg)
try:
suggestion.downvotes.clear()
except:
errormsg = "Failed to clear downvotes for %s" % suggestion
print(errormsg)
def createHuddles():
random.seed()
list_adjs = []
list_nouns = []
list_roadtype = ['Avenue','Road','Street','Drive']
string_book = ''
with open('adjectives.txt','r') as adjs,open('nouns.txt','r') as nouns,\
open('aristotle.txt','r') as fi:
list_adjs = adjs.readlines()
list_nouns = nouns.readlines()
string_book = fi.read()
for league in League.objects.all():
for i in range(0,10):
name = list
|
_adjs[random.randint(1,len(list_adjs))-1].strip('\n'
|
) + " " + list_nouns[random.randint(1,len(list_nouns))-1].strip('\n') + " huddle"
address = str(random.randint(1,1000
|
CN-UPB/OpenBarista
|
utils/decaf-utils-protocol-stack/decaf_utils_protocol_stack/rpc/json_rpc_application.py
|
Python
|
mpl-2.0
| 7,693 | 0.0039 |
##
# Copyright 2016 DECaF Project Group, University of Paderborn
# This file is part of the decaf orchestration framework
# All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
##
import uuid
from twisted.internet.defer import Deferred, inlineCallbacks
from multiprocessing.dummy import Pool
from decaf_utils_protocol_stack import JsonRpcMessageApplication
from decaf_utils_protocol_stack.json import JsonRPCCall, JsonRPCNotify, JsonRPCError, JsonRPCResult
from decaf_utils_protocol_stack.rpc.sync_result import sync
class RpcLayer(JsonRpcMessageApplication):
def __init__(self, host_url=u"amqp://127.0.0.1", ioloop=None, **params):
super(RpcLayer, self).__init__(host_url=host_url, ioloop=ioloop, **params)
self._pending = dict()
self._pool = Pool()
self.msg_handler = None
def send_result(self, receiver, result, id):
exchange = str(receiver).split(".")[0]
self.route(receiver, JsonRPCResult(result=result, id=id), exchange=exchange)
def send_error(self, receiver, code, id, *args, **kwargs):
self.route(receiver, JsonRPCError(code=code, id=id, args=args, kwargs=kwargs))
def call(self, method, *args, **kwargs):
corr_id = str(uuid.uuid4())
ret = Deferred()
self._pending[corr_id] = ret
self.route(method, JsonRPCCall(method=method, args=args, kwargs=kwargs, id=corr_id))
return ret
def publish(self, routing_key, msg, **params):
corr_id = str(uuid.uuid4())
ret = Deferred()
self._pending[corr_id] = ret
self.route(routing_key, JsonRPCNotify(method=routing_key, args=(msg,)), **params)
return ret
def callSync(self, timeout, rpc_name, *args, **kwargs):
@sync(timeout=timeout)
def wrap():
return self.call(rpc_name, *args, **kwargs)
return wrap()
def notify(self, method, *args, **kwargs):
self.publish(method, JsonRPCNotify(method=method, args=args, kwargs=kwargs))
def deregister(self, routing_key, **params):
params["method"] = "anycast"
super(RpcLayer, self).unsubscribe(routing_key, **params)
def unsubscribe(self, routing_key, **params):
super(RpcLayer, self).unsubscribe(routing_key, **params)
def subscribe(self, routing_key, function_pointer, frame=False, **params):
self.logger.debug("Subscribing to %s with params: %s" % (routing_key, params))
if function_pointer is None:
function_pointer = self.receive
else:
if not frame: function_pointer = self._make_handler(function_pointer)
function_pointer = self.apply_in_pool(function_pointer)
super(RpcLayer, self).subscribe(routing_key, function_pointer=function_pointer, **params)
def register_direct(self, routing_key, msg_handler):
pass
def register(self, routing_key, function_pointer=None, **params):
if function_pointer is None:
function_pointer = self.receive
else:
function_pointer = self._make_handler(function_pointer)
function_pointer = self.apply_in_pool(function_pointer)
params = params or dict()
params["method"] = "anycast"
self._top_layer.subscribe(r
|
outing_key, function_pointer=function_pointer, **params)
def receive(self, *args, **kwargs):
self._pool.apply_async(func=self._receive
|
, args=args, kwds=kwargs)
def apply_in_pool(self, function):
def apply_f(*args, **kwargs):
self._pool.apply_async(func=function, args=args, kwds=kwargs)
apply_f.func_name = function.func_name
return apply_f
def _make_handler(self, function):
"""
This method creates a wrapper for the given "function".
This serves two purposes:
A) Send the result back to the caller.
B) Create an environment for asynchronous RPC within function.
:param function:
:param reply_to:
:param corr_id:
:return:
"""
# ----------------- INTERNAL FUNCTION ------------------------------------------------------------
@inlineCallbacks
def on_call(routing_key, message, sender=None, **params):
assert self.logger
if isinstance(message, JsonRPCCall):
try:
self.logger.info("-------------------CALL TO COMPONENT-----------------------")
self.logger.info("Executing function '%s' with argument(s) %s and %s", function.func_name,
message.get_args, message.get_kwargs)
res = yield function(*message.get_args, **message.get_kwargs)
# self._out_channel.basic_ack(delivery_tag=delivery_tag)
self.send_result(result=res, receiver=sender, id=message.get_id)
except BaseException as e:
self.logger.info("----------------CALL TO COMPONENT FAILED---------------------")
self.logger.exception("Message: \n %s \n caused an Error: \n %s" % (message, e))
self.send_error(code=1, message=e.message, receiver=sender, id=message.get_id, args=e.args)
except:
self.logger.info("-----------------CALL TO COMPONENT FAILED---------------------")
self.logger.exception("Message: \n %s \n caused an Error" % (message))
self.send_error(code=1, receiver=sender, id=message.get_id)
if isinstance(message, JsonRPCNotify):
try:
self.logger.info("--------------DELIVER EVENT TO COMPONENT---------------------------")
self.logger.info("Executing function '%s' with argument(s) %s and %s", function.func_name,
message.get_args, message.get_kwargs)
function(*message.get_args, **message.get_kwargs)
except BaseException as e:
self.logger.info("--------------DELIVER EVENT TO COMPONENT FAILED---------------------")
self.logger.exception("Message: \n %s \n caused an Error: \n %s" % (message, e))
# ----------------- INTERNAL FUNCTION ------------------------------------------------------------
return on_call
def _receive(self, routing_key, message, sender=None, **params):
if isinstance(message, JsonRPCResult):
self.logger.info("----------------RECEIVED A RESULT---------------------")
self.logger.info("Result received: \n %s" % (message))
corr_id = message.get_id
deferred = self._pending.get(corr_id, None)
if deferred:
deferred.callback(message.get_result)
del self._pending[corr_id]
if isinstance(message, JsonRPCError):
self.logger.info("----------------RECEIVED AN ERROR---------------------")
self.logger.exception("Error received: \n %s" % (message))
corr_id = message.get_id
deferred = self._pending.get(corr_id, None)
if deferred:
deferred.errback(message)
del self._pending[corr_id]
if self.msg_handler:
self.msg_handler(routing_key, message, sender, **params)
pass
def get_transport_layer(self):
return super(RpcLayer, self).get_transport_layer()
def set_msg_handler(self, msg_handler):
self.msg_handler = msg_handler
|
jvanbrug/scout
|
functional_tests.py
|
Python
|
mit
| 1,460 | 0 |
import pytest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
@pytest.fixture(scope='function')
def browser(request):
browser_ = webdriver.Firefox()
def fin():
browser_.quit()
request.addfinalizer(fin)
return browser_
def test_can_show_a_relevant_code_snippet(browser):
# Jan visits the site
browser.get('http://localhost:8000')
# He notices the title and header reference the site name
site_name = 'Scout'
assert site_name in browser.title
header_text = browser.find_element_by_tag_name('h1').text
assert site_name in header_text
# He is invited to search for code snippets
expected_search_prompt = 'Enter some code-related keywords'
search_box = browser.find_element_by_id('id_search_box')
actual_search_prompt = search_box.get_attribute('placehold
|
er')
assert actual
|
_search_prompt == expected_search_prompt
# He searches "python yield"
search_box.send_keys('python yield')
search_box.send_keys(Keys.ENTER)
# The page updates, and now the page shows a code snippet
# that uses the dummy variables "mylist" and "mygenerator"
# (the highest-voted python page on StackOverflow.com is
# /questions/231767/what-does-the-yield-keyword-do-in-python)
snippets = browser.find_elements_by_tag_name('code')
assert any(['mylist' in snippet.text and 'mygenerator' in snippet.text
for snippet in snippets])
|
tkaitchuck/nupic
|
examples/prediction/experiments/confidenceTest/2/description.py
|
Python
|
gpl-3.0
| 2,040 | 0.005392 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ---------------------------------
|
-------------------------------------
"""
Tests the following set of sequences:
z-a-b-c: (1X)
a-b-c: (6X)
a-d-e: (2X)
a-f-g-a-h: (1X)
We want to insure that when we see 'a', that we predict 'b' with highest
confidence, then 'd', then 'f' and 'h' with equally low confidence.
We expect the following prediction scores:
inputPredScore_at1 : 0.7
inputPredScore_at2
|
: 1.0
inputPredScore_at3 : 1.0
inputPredScore_at4 : 1.0
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=2,
filenameTrain = 'confidence/confidence2.csv',
filenameTest = 'confidence/confidence2.csv',
iterationCountTrain=None,
iterationCountTest=None,
trainTPRepeats = 5,
trainTP=True,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
|
gaeun/open-event-orga-server
|
app/api/tracks.py
|
Python
|
gpl-3.0
| 3,167 | 0.000316 |
from flask.ext.restplus import Namespace
from app.models.track import Track as TrackModel
from .helpers import custom_fields as fields
from .helpers.helpers import (
can_create,
can_update,
can_delete,
requires_auth
)
from .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, ServiceDAO, \
PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, SERVICE_RESPONSES
from .helpers.utils import Resource, ETAG_HEADER_DEFN
api = Namespace('tracks', description='Tracks', path='/')
TRACK_SESSION = api.model('TrackSession', {
'id': fields.Integer(required=True),
'title': fields.String(),
})
TRACK = api.model('Track', {
'id': fields.Integer(required=True),
'name': fields.String(required=True),
'description': fields.String(),
'color': fields.Color(required=True),
'track_image_url': fields.Upload(),
'location': fields.String(),
'sessions': fields.List(fields.Nested(TRACK_SESSION)),
})
TRACK_PAGINATED = api.clone('TrackPaginated', PAGINATED_MODEL, {
'results': fields.List(fields.Nested(TRACK))
})
TRACK_POST = api.clone('TrackPost', TRACK)
del TRACK_POST['id']
del TRACK_POST['sessions']
# Create DAO
class TrackDAO(ServiceDAO):
version_key = 'tracks_ver'
DAO = TrackDAO(TrackModel, TRACK_POST)
@api.route('/events/<int:event_id>/tracks/<int:track_id>')
@api.doc(responses=SERVICE_RESPONSES)
class Track(Resource):
@api.doc('get_track')
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(TRACK)
def get(self, event_id, track_id):
"""Fetch a track given its id"""
return DAO.get(event_id, track_id)
@requires_auth
@can_delete(DAO)
@api.doc('delete_track')
@api.marshal_with(TRACK)
def delete(self, event_id, track_id):
"""Delete a track given its id"""
return DAO.delete(event_id, track_id)
@requires_auth
@can_update(DAO)
@api.doc('update_track', responses=PUT_RESPONSES)
@api.marshal_with(TRACK)
@api.expect(TRACK_POST)
def put(self, event_id, track_id):
"""Update a track given its id"""
return DAO.update(event_id, track_id, self.api.payload)
@api.route('/events/<int:event_id>/tracks')
class TrackList(Resource):
@api.doc('list_tracks')
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_list_with(TRACK)
def get(self, event_id):
"""List all tracks"""
return DAO.list(event_id)
@requires_auth
|
@can_create(DAO)
@api.doc('create_track', responses=POST_RESPONSES)
@api.marshal_with(TRACK)
@api.expect(TRACK_POST)
def post(self, event_id):
"""Create a track"""
retu
|
rn DAO.create(
event_id,
self.api.payload,
self.api.url_for(self, event_id=event_id)
)
@api.route('/events/<int:event_id>/tracks/page')
class TrackListPaginated(Resource, PaginatedResourceBase):
@api.doc('list_tracks_paginated', params=PAGE_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(TRACK_PAGINATED)
def get(self, event_id):
"""List tracks in a paginated manner"""
args = self.parser.parse_args()
return DAO.paginated_list(args=args, event_id=event_id)
|
robwarm/gpaw-symm
|
doc/documentation/tddft/Be_8bands_lrtddft_dE.py
|
Python
|
gpl-3.0
| 218 | 0.004587 |
from gpaw import GPAW
from gpaw.lrtddft import LrTDDF
|
T
c = GPAW('Be_gs_8bands.gpw')
dE = 10 # maximal Kohn-Sham transition energy to consider in eV
lr = LrTDDFT(c, xc='LDA', energy_range=dE)
lr.writ
|
e('lr_dE.dat.gz')
|
EmreAtes/spack
|
lib/spack/spack/test/sbang.py
|
Python
|
lgpl-2.1
| 6,406 | 0.001405 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARR
|
ANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with thi
|
s program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""\
Test that Spack's shebang filtering works correctly.
"""
import os
import stat
import pytest
import tempfile
import shutil
import filecmp
from llnl.util.filesystem import mkdirp
import spack
from spack.hooks.sbang import shebang_too_long, filter_shebangs_in_directory
from spack.util.executable import which
short_line = "#!/this/is/short/bin/bash\n"
long_line = "#!/this/" + ('x' * 200) + "/is/long\n"
lua_line = "#!/this/" + ('x' * 200) + "/is/lua\n"
lua_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
lua_line_patched = "--!/this/" + ('x' * 200) + "/is/lua\n"
node_line = "#!/this/" + ('x' * 200) + "/is/node\n"
node_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
node_line_patched = "//!/this/" + ('x' * 200) + "/is/node\n"
sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root
last_line = "last!\n"
class ScriptDirectory(object):
"""Directory full of test scripts to run sbang instrumentation on."""
def __init__(self):
self.tempdir = tempfile.mkdtemp()
self.directory = os.path.join(self.tempdir, 'dir')
mkdirp(self.directory)
# Script with short shebang
self.short_shebang = os.path.join(self.tempdir, 'short')
with open(self.short_shebang, 'w') as f:
f.write(short_line)
f.write(last_line)
# Script with long shebang
self.long_shebang = os.path.join(self.tempdir, 'long')
with open(self.long_shebang, 'w') as f:
f.write(long_line)
f.write(last_line)
# Lua script with long shebang
self.lua_shebang = os.path.join(self.tempdir, 'lua')
with open(self.lua_shebang, 'w') as f:
f.write(lua_line)
f.write(last_line)
# Lua script with long shebang
self.lua_textbang = os.path.join(self.tempdir, 'lua_in_text')
with open(self.lua_textbang, 'w') as f:
f.write(short_line)
f.write(lua_in_text)
f.write(last_line)
# Node script with long shebang
self.node_shebang = os.path.join(self.tempdir, 'node')
with open(self.node_shebang, 'w') as f:
f.write(node_line)
f.write(last_line)
# Node script with long shebang
self.node_textbang = os.path.join(self.tempdir, 'node_in_text')
with open(self.node_textbang, 'w') as f:
f.write(short_line)
f.write(node_in_text)
f.write(last_line)
# Script already using sbang.
self.has_sbang = os.path.join(self.tempdir, 'shebang')
with open(self.has_sbang, 'w') as f:
f.write(sbang_line)
f.write(long_line)
f.write(last_line)
# Fake binary file.
self.binary = os.path.join(self.tempdir, 'binary')
tar = which('tar', required=True)
tar('czf', self.binary, self.has_sbang)
def destroy(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@pytest.fixture
def script_dir():
sdir = ScriptDirectory()
yield sdir
sdir.destroy()
def test_shebang_handling(script_dir):
assert shebang_too_long(script_dir.lua_shebang)
assert shebang_too_long(script_dir.long_shebang)
assert not shebang_too_long(script_dir.short_shebang)
assert not shebang_too_long(script_dir.has_sbang)
assert not shebang_too_long(script_dir.binary)
assert not shebang_too_long(script_dir.directory)
filter_shebangs_in_directory(script_dir.tempdir)
# Make sure this is untouched
with open(script_dir.short_shebang, 'r') as f:
assert f.readline() == short_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.long_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.lua_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == lua_line_patched
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.node_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == node_line_patched
assert f.readline() == last_line
assert filecmp.cmp(script_dir.lua_textbang,
os.path.join(script_dir.tempdir, 'lua_in_text'))
assert filecmp.cmp(script_dir.node_textbang,
os.path.join(script_dir.tempdir, 'node_in_text'))
# Make sure this is untouched
with open(script_dir.has_sbang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
def test_shebang_handles_non_writable_files(script_dir):
# make a file non-writable
st = os.stat(script_dir.long_shebang)
not_writable_mode = st.st_mode & ~stat.S_IWRITE
os.chmod(script_dir.long_shebang, not_writable_mode)
test_shebang_handling(script_dir)
st = os.stat(script_dir.long_shebang)
assert oct(not_writable_mode) == oct(st.st_mode)
|
pylover/khayyam
|
khayyam/tests/test_algorithms.py
|
Python
|
gpl-3.0
| 4,401 | 0.002954 |
# -*- coding: utf-8 -*-
import unittest
from khayyam import algorithms_c as alg_c
from khayyam import algorithms_pure as alg_p
__author__ = 'vahid'
# TODO: test with negative values
class TestCAlgorithms(unittest.TestCase):
def test_get_julian_day_from_gregorian(self):
self.assertRaises(ValueError, alg_p.get_julian_day_from_gregorian_date, 2016, 2, 30)
self.assertRaises(ValueError, alg_p.get_julian_day_from_gregorian_date, 2015, 2, 29)
self.assertRaises(ValueError,
|
alg_c.get_julian_day_from_gregorian_date, 2016, 2, 30)
self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, 2015, 2, 29)
self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, -4713, 2, 30)
|
self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, -4713, 2, 29)
self.assertEqual(
alg_c.get_julian_day_from_gregorian_date(-4713, 11, 25),
alg_p.get_julian_day_from_gregorian_date(-4713, 11, 25)
)
for i in range(3000):
self.assertEqual(
alg_c.get_julian_day_from_gregorian_date(i, 1, 1),
alg_p.get_julian_day_from_gregorian_date(i, 1, 1)
)
def test_is_leap_year(self):
for i in range(3000):
self.assertEqual(
alg_c.is_jalali_leap_year(i),
alg_p.is_jalali_leap_year(i)
)
def test_days_in_year(self):
for i in range(3000):
self.assertEqual(
alg_c.get_days_in_jalali_year(i),
alg_p.get_days_in_jalali_year(i)
)
def test_days_in_month(self):
for i in range(3000):
for m in range(1, 13):
c = alg_c.get_days_in_jalali_month(i, m)
p = alg_p.get_days_in_jalali_month(i, m)
self.assertEqual(c, p, "year: %s, month: %s, results: {c: %s, py: %s}" % (i, m, c, p))
def test_julian_day_from_jalali_date(self):
for y in range(303):
for m in range(1, 13):
for d in range(1, alg_c.get_days_in_jalali_month(y, m)+1):
self.assertEqual(
alg_c.get_julian_day_from_jalali_date(y, m, d),
alg_p.get_julian_day_from_jalali_date(y, m, d),
"year: %s, month: %s, day: %s" % (y, m, d)
)
def test_jalali_date_from_julian_day(self):
jd = 0
while jd < 365 * 1000:
jd += 1
c = alg_c.get_jalali_date_from_julian_day(jd)
p = alg_p.get_jalali_date_from_julian_day(jd)
self.assertEqual(c, p, "Julian day: %s\t%s <> %s" % (jd, c, p))
def test_gregorian_date_from_julian_day(self):
jd = 0
self.assertRaises(ValueError, alg_c.get_gregorian_date_from_julian_day, jd)
self.assertRaises(ValueError, alg_p.get_gregorian_date_from_julian_day, jd)
while jd < 365 * 200:
jd += 1
self.assertEqual(
alg_c.get_gregorian_date_from_julian_day(jd),
alg_p.get_gregorian_date_from_julian_day(jd)
)
def test_jalali_date_from_gregorian_date(self):
jd = 0
while jd < 365 * 200:
jd += 1
cd = alg_c.get_gregorian_date_from_julian_day(jd)
pd = alg_p.get_gregorian_date_from_julian_day(jd)
c = alg_c.get_jalali_date_from_gregorian_date(*cd)
p = alg_p.get_jalali_date_from_gregorian_date(*pd)
self.assertEqual(c, p, 'jd: %s c: %s py: %s cdate: %s pydate: %s' % (jd, c, p, cd, pd))
def test_algorithm_import(self):
from khayyam import algorithms
self.assertTrue(hasattr(algorithms, 'is_jalali_leap_year'))
self.assertTrue(hasattr(algorithms, 'get_days_in_jalali_year'))
self.assertTrue(hasattr(algorithms, 'get_days_in_jalali_month'))
self.assertTrue(hasattr(algorithms, 'get_julian_day_from_gregorian_date'))
self.assertTrue(hasattr(algorithms, 'get_julian_day_from_jalali_date'))
self.assertTrue(hasattr(algorithms, 'get_jalali_date_from_julian_day'))
self.assertTrue(hasattr(algorithms, 'get_jalali_date_from_gregorian_date'))
self.assertTrue(hasattr(algorithms, 'get_gregorian_date_from_julian_day'))
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
idealabasu/code_pynamics
|
python/pynamics/integration.py
|
Python
|
mit
| 1,030 | 0.017476 |
import pynamics
import numpy
import logging
logger = logging.getLogger('pynamics.integration')
def integrate(*args,**kwargs):
|
if pynamics.integrator==0:
return integrate_odeint(*args,**kwargs)
elif pynamics.integrator==1:
newargs = args[0],args[2][0],args[1],args[2][-1]
return integrate_rk(*newargs ,**kwargs)
def integrate_odeint(*arguments,**keyword_arguments):
import scipy.integrate
logger.info('beginning integration')
result = scipy.integrate.odeint(*arguments,**keyword_arguments)
logger.info('finished
|
integration')
return result
def integrate_rk(*arguments,**keyword_arguments):
import scipy.integrate
logger.info('beginning integration')
try:
result = scipy.integrate.RK45(*arguments,**keyword_arguments)
y = [result.y]
while True:
result.step()
y.append(result.y)
except RuntimeError:
pass
logger.info('finished integration')
return y
|
Hexadorsimal/pynes
|
nes/__init__.py
|
Python
|
mit
| 73 | 0 |
from .nes import N
|
es
from .bus
|
.devices.cartridge import CartridgeFactory
|
gptech/ansible
|
lib/ansible/modules/packaging/os/apt.py
|
Python
|
gpl-3.0
| 35,672 | 0.003644 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Flowroute LLC
# Written by Matthew Williams <matthew@flowroute.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: apt
short_description: Manages apt-packages
description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
name:
description:
- A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards
like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let
typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for
the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding)
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
are installed.
required: false
default: present
choices: [ "latest", "absent", "present", "build-dep" ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
required: false
default: no
choices: [ "yes", "no" ]
cache_valid_time:
description:
- Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds.
required: false
default: 0
purge:
description:
- Will force purging of configuration files if the module state is set to I(absent).
required: false
default: no
choices: [ "yes", "no" ]
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities
required: false
default: null
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install
recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
required: false
default: null
choices: [ "yes", "no" ]
force:
description:
- If C(yes), force installs/removes.
required: false
default: "no"
choices: [ "yes", "no" ]
allow_unauthenticated:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.1"
upgrade:
description:
- 'If yes or safe, performs an aptitude safe-upgrade.'
- 'If full, performs an aptitude full-upgrade.'
- 'If dist, performs an apt-get dist-upgrade.'
- 'Note: This does not upgrade a specific package, use state=latest for that.'
version_added: "1.1"
required: false
default: "no"
choices: [ "no", "yes", "safe", "full", "dist"]
dpkg_options:
description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--for
|
ce-confdef" -o "Dpkg::Options::=--force-confold"'
- Options should be supplied as comma separated list
required: false
default: 'force-confdef,force-confold'
deb:
description:
- Path to a .deb package on the remote machine.
- If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
required: fal
|
se
version_added: "1.6"
autoremove:
description:
- If C(yes), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option.
required: false
default: no
choices: [ "yes", "no" ]
aliases: [ 'autoclean']
version_added: "2.1"
only_upgrade:
description:
- Only upgrade a package if it is already installed.
required: false
default: false
version_added: "2.1"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
- aptitude
author: "Matthew Williams (@mgwilliams)"
notes:
- Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise
C(apt-get) suffices.
'''
EXAMPLES = '''
- name: Update repositories cache and install "foo" package
apt:
name: foo
update_cache: yes
- name: Remove "foo" package
apt:
name: foo
state: absent
- name: Install the package "foo"
apt:
name: foo
state: present
- name: Install the version '1.00' of package "foo"
apt:
name: foo=1.00
state: present
- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
apt:
name: nginx
state: latest
default_release: squeeze-backports
update_cache: yes
- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
apt:
name: openjdk-6-jdk
state: latest
install_recommends: no
- name: Update all packages to the latest version
apt:
upgrade: dist
- name: Run the equivalent of "apt-get update" as a separate step
apt:
update_cache: yes
- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
apt:
update_cache: yes
cache_valid_time: 3600
- name: Pass options to dpkg on run
apt:
upgrade: dist
update_cache: yes
dpkg_options: 'force-confold,force-confdef'
- name: Install a .deb package
apt:
deb: /tmp/mypackage.deb
- name: Install the build dependencies for package "foo"
apt:
pkg: foo
state: build-dep
- name: Install a .deb package from the internet.
apt:
deb: https://example.com/python-ppq_0.1-1_all.deb
'''
RETURN = '''
cache_updated:
description: if the cache was updated or not
returned: success, in some cases
type: boolean
sample: True
cache_update_time:
description: time of the last cache update (0 if unknown)
returned: success, in some cases
type: int
sample: 1425828348000
stdout:
description: output from apt
returned: success, when needed
type: string
sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..."
stderr:
description: error output from apt
returned: success, when needed
type: string
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
''' # NOQA
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import datetime
import fnmatch
import itertools
import os
import re
import sys
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.urls import fetch_url
# APT related constants
APT_ENV_VARS = dict(
DEBIAN_FRONTEND = 'noninteractive',
DEBIAN_PRIORIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.