repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
beyoungwoo/C_glibc_Sample
|
_Algorithm/ProjectEuler_python/euler_17.py
|
Python
|
gpl-3.0
| 1,631 | 0.045984 |
#!/usr/bin/python -Wall
# -*- coding: utf-8 -*-
"""
<div id="content">
<div style="text-align:center;" class="print"><img src="images/print_page_logo.png" alt="projecteuler.net" style="border:none;" /></div>
<h2>Number letter counts</h2><div id="problem_info" class="info"><h3>Problem 17</h3><span>Published on Friday, 17th May 2002, 06:00 pm; Solved by 88413; Difficulty rating: 5%</span></div>
<div class="problem_content" role="problem">
<p>If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.</p>
<p>If all
|
the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? </p>
<br />
<p class="note"><b>NOTE:</b> Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.</p>
</div><br />
<br /></div>
"""
s={0:"",1:"one",2:"two",3:"three",4:"four",5:"five",6:"six",7:"se
|
ven",8:"eight",9:"nine",10:"ten",11:"eleven",12:"twelve",13:"thirteen",14:"fourteen",15:"fifteen",16:"sixteen",17:"seventeen",18:"eighteen",19:"nineteen",20:"twenty",30:"thirty",40:"forty",50:"fifty",60:"sixty",70:"seventy",80:"eighty",90:"ninety"}
for i in range(1,1000):
if(not i in s.keys()):
if(i<100):
s[i]=s[i/10*10]+s[i%10]
else:
s[i]=s[i/100]+"hundred"
if(i%100):
s[i]+="and"+s[i%100]
s[1000]="onethousand"
total=0;
for i in s.values():
total+=len(i)
print total
|
lyuboshen/Pose-Estimation-on-Depth-Images-of-Clinical-Patients-V2.0
|
src/load_data.py
|
Python
|
mit
| 9,257 | 0.013179 |
import os
import numpy as np
import cv2
import json
start_index = 0
image_path = 'images/trial_3_autoencoder/'
test_path = 'test/trial_3_autoencoder/'
json_file = 'annotations/all_patient.json'
image_rows = 376
image_cols = 312
image_rows_map = 46
image_cols_map = 38
with open(json_file) as jf:
dict = json.load(jf)
def gaussian_kernel(h, w, sigma_h, sigma_w):
yx = np.mgrid[-h//2:h//2,-w//2:w//2]**2
return np.exp(-yx[0,:,:] / sigma_h**2 - yx[1,:,:] / sigma_w**2)
def max(a,b):
return a if a>=b else b
def min(a,b):
return a if a<=b else b
def gen_kernel(score_map,img_info,h, w, sigma_h, sigma_w):
kernal = gaussian_kernel(h, w, sigma_h, sigma_w)
y, x = np.unravel_index(np.argmax(score_map), [len(score_map), len(score_map[0])])
score_map[max(y-h//2,0):min(y+h//2,img_info["img_height"]), max(x-w//2,0):min(x+w//2,img_info["img_width"])] \
= kernal[max(h//2-y,0):,max(w//2-x,0):]
# cv2.imshow('after',score_map)
# cv2.waitKey()
score_map = cv2.resize(score_map, (image_cols, image_rows), interpolation=cv2.INTER_CUBIC)
# cv2.imshow('after',score_map)
# cv2.waitKey()
return score_map
def gen_center_kernel(center_map,img_info,h, w, sigma_h, sigma_w):
kernal = gaussian_kernel(h, w, sigma_h, sigma_w)
y, x = np.unravel_index(np.argmax(center_map), [len(center_map), len(center_map[0])])
center_map[max(y-h//2,0):min(y+h//2,img_info["img_height"]), max(x-w//2,0):min(x+w//2,img_info["img_width"])] \
= kernal[max(h//2-y,0):,max(w//2-x,0):]
center_map = cv2.resize(center_map, (image_cols, image_rows), interpolation=cv2.INTER_CUBIC)
return center_map
flip_map = [0, 1, 5, 6, 7, 2, 3, 4, 11, 12, 13, 8, 9, 10]
def create_train_data():
print('Creating training original images...')
print('-'*30)
i = 0
path_depth = os.path.join(image_path, 'depth_vis')
path_ir = os.path.join(image_path, 'ir')
train_depth = os.listdir(path_depth)
total_imgs = len(train_depth)*2
depth_imgs = np.ndarray((total_imgs, image_rows, image_cols, 3), dtype=np.uint8)
ir_imgs = np.ndarray((total_imgs, image_rows, image_cols, 3), dtype=np.uint8)
centers = np.ndarray((total_imgs, 2), dtype=np.int16)
annotations = np.ndarray((total_imgs, 14, 2), dtype=np.int16)
for img_info in dict:
if(img_info["patient"] != "7"):
depth_img = cv2.imread(os.path.join(path_depth, img_info["image_name"]),cv2.IMREAD_UNCHANGED)
ir_img = cv2.imread(os.path.join(path_ir, img_info["image_name"]))
depth_img_resized = cv2.resize(depth_img, (image_cols, image_rows), interpolation=cv2.INTER_NEAREST)
ir_img_resized = cv2.resize(ir_img, (image_cols, image_rows), interpolation=cv2.INTER_NEAREST)
# ir_img_resized_small = cv2.resize(ir_img, (image_cols_map, image_rows_map), interpolation=cv2.INTER_NEAREST)
depth_imgs[i,:,:,0] = depth_img_resized
depth_imgs[i,:,:,1] = depth_img_resized
depth_imgs[i,:,:,2] = depth_img_resized
depth_imgs[i+1,:,:,0] = cv2.flip(depth_img_resized,1)
depth_imgs[i+1,:,:,1] = cv2.flip(depth_img_resized,1)
depth_imgs[i+1,:,:,2] = cv2.flip(depth_img_resized,1)
ir_imgs[i] = ir_img_resized
ir_imgs[i+1] = cv2.flip(ir_img_resized, 1)
center_map = np.zeros((int(img_info["img_height"]), int(img_info["img_width"])))
center_map[img_info["objpos"][0]][img_info["objpos"][1]] = 1
center_map_resized = cv2.resize(center_map, (image_cols, image_rows), interpolation=cv2.INTER_CUBIC)
center_map_resized_fliped = cv2.flip(center_map_resized, 1)
centers[i] = np.unravel_index(np.argmax(center_map_resized), [center_map_resized.shape[0], center_map_resized.shape[1]])
centers[i+1] = np.unravel_index(np.argmax(center_map_resized_fliped), [center_map_resized_fliped.shape[0], center_map_resized_fliped.shape[1]])
for x in range(0,14):
score_map = np.zeros((int(img_info["img_height"]), int(img_info["img_width"])))
score_map[img_info["joints"][x][0]][img_info["joints"][x][1]] = 1
score_map_resized = cv2.resize(score_map, (image_cols, image_rows), interpolation=cv2.INTER_CUBIC)
score_map_resized_fliped = cv2.flip(score_map_resized, 1)
annotations[i][x] = np.unravel_index(np.argmax(score_map_resized), [score_map_resized.shape[0], score_map_resized.shape[1]])
annotations[i + 1][flip_map[x]] = np.unravel_index(np.argmax(score_map_resized_fliped), [score_map_resized_fliped.shape[0], score_map_resized_fliped.shape[1]])
# for x in range(0,14):
# score_map = np.zeros((image_rows, image_cols))
# score_map[annotations[i][x][0]][annotations[i][x][1]] = 1
# score_map1 = np.zeros((image_rows, image_cols))
# score_map1[annotations[i+1][x][0]][annotations[i+1][x][1]] = 1
# cv2.imshow('show',score_map)
# cv2.imshow('show2', score_map1)
# cv2.waitKey(1000)
if i % 100 == 0:
print('Done: {0}/{1} train original images'.format(i, total_imgs))
i += 2
print('Loading done.')
np.save('./dataset/train_autoencoder_depth.npy', depth_imgs)
np.save('./dataset/train_autoencoder_ir.npy', ir_imgs)
np.save('./dataset/train_autoencoder_center.npy', centers)
np.save('./dataset/train_autoencoder_annotation.npy', annotations)
print('Saving done.')
def create_test_data():
print('Creating test images...')
print('-' * 30)
i = 0
path_depth = os.path.join(test_path, 'depth_vis')
path_ir = os.path.join(test_path, 'ir')
test_depth = os.listdir(path_depth)
total_imgs = len(test_depth) * 2
depth_imgs = np.ndarray((total_imgs, image_rows, image_cols, 3), dtype=np.uint8)
ir_imgs = np.ndarray((total_imgs, image_rows, image_cols, 3), dtype=np.uint8)
centers = np.ndarray((total_imgs, 2), dtype=np.int16)
annotations = np.ndarray((total_imgs, 14, 2), dtype=np.int16)
for img_info in dict:
if (img_info["patient"] == "7"):
depth_img = cv2.imread(os.path.join(path_depth, img_info["image_name"]), cv2.IMREAD_UNCHANGED)
ir_img = cv2.imread(os.path.join(path_ir, img_info["image_name"]))
depth_im
|
g_resized = cv2.resize(depth_img, (image_cols, image_rows), interpolation=cv2.INTER_NEAREST)
ir_img_resized = cv2.resize(ir_img, (image_cols, image_rows), interpolation=cv2.INTER_NEAREST)
depth_img_resized = np.asarray(depth_img_resized)
depth_imgs[i,:,:,0] = depth_img_resized
depth_imgs[i,:,:,1] = depth_img_resized
depth_imgs[i,:,:,2] = depth_img_resized
depth_imgs[i+1,:,:,0] = cv2.flip(depth_img_res
|
ized, 1)
depth_imgs[i+1,:,:,1] = cv2.flip(depth_img_resized, 1)
depth_imgs[i+1,:,:,2] = cv2.flip(depth_img_resized, 1)
ir_imgs[i] = ir_img_resized
ir_imgs[i+1] = cv2.flip(ir_img_resized, 1)
center_map = np.zeros((int(img_info["img_height"]), int(img_info["img_width"])))
center_map[img_info["objpos"][0]][img_info["objpos"][1]] = 1
center_map_resized = cv2.resize(center_map, (image_cols, image_rows), interpolation=cv2.INTER_CUBIC)
center_map_resized_fliped = cv2.flip(center_map_resized, 1)
centers[i] = np.unravel_index(np.argmax(center_map_resized),
[center_map_resized.shape[0], center_map_resized.shape[1]])
centers[i + 1] = np.unravel_index(np.argmax(center_map_resized_fliped),
[center_map_resized_fliped.shape[0], center_map_resized_fliped.shape[1]])
for x in range(0, 14):
score_map = np.zeros((int(img_info["img_height"]), int(img_info["img_width"])))
score_map[img_info["joints"][x][0]][img_info["joints"][x][1]] = 1
score_map_resized = cv2.resize(score_map, (image_cols, image_rows), interpolation=
|
neulab/compare-mt
|
compare_mt/cache_utils.py
|
Python
|
bsd-3-clause
| 642 | 0.018692 |
def extract_cache_dicts(cache_dicts, key_list, num_out):
if cache_dicts is not None:
if len(cache_dicts) != num_out:
raise ValueError(f'Length of cache_dic
|
ts should be equal to the number of output files!')
if len(key_list) == 1:
return [c[key_list[0]] for c in cache_dicts]
return zip(*[[c[k] for k in key_
|
list] for c in cache_dicts])
return [None]*len(key_list)
def return_cache_dict(key_list, value_list):
for v in value_list:
if len(v) != 1:
raise ValueError(f'Only support caching for one system at a time!')
cache_dict = {k:v[0] for (k, v) in zip(key_list, value_list)}
return cache_dict
|
Sonictherocketman/metapipe
|
test/mocks.py
|
Python
|
mit
| 673 | 0 |
""" A series of mocks for metapipe. """
from metapipe.models import Job
class MockJob(Job):
def __init__(self, alias, command, depends_on=[]):
super(MockJob, self).__init__(alias, command, depends
|
_on)
self._submitted = False
self._done = False
self._step = 0
|
def __repr__(self):
return '<MockJob: {}>'.format(self.alias)
def submit(self):
self._step += 1
def is_running(self):
self._step += 1
return self._step > 1 and self._step < 10
def is_queued(self):
return False
def is_complete(self):
return self._step > 10
def is_fail(self):
return False
|
IsacEkberg/crag-finder
|
django_api/migrations/0019_auto_20160420_2323.py
|
Python
|
gpl-3.0
| 2,068 | 0.00436 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-20 21:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_api.models
class Migration(migrations.Migration):
dependencies = [
('django_api', '0018_auto_20160420_2316'),
]
operations = [
migrations.CreateModel(
name='AreaImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=django_api.models._image_file_path, verbose_name='bild')),
('area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image', to='django_api.Area')),
],
options={
|
'verbose_name': 'områdes bild',
'verbose_name_plural': 'områdes bild',
},
),
migrations.CreateModel(
name='RockFaceImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=django_api.models._image_file_path, verbose_name='bild')),
|
('name', models.CharField(max_length=255, null=True, verbose_name='namn')),
('description', models.TextField(blank=True, null=True, verbose_name='kort beskrivning av bilden')),
('rockface', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image', to='django_api.RockFace')),
],
options={
'verbose_name': 'bild på klippan',
'verbose_name_plural': 'bilder på klippan',
},
),
migrations.AddField(
model_name='route',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='django_api.RockFaceImage', verbose_name='bild'),
),
]
|
lanky/fabrik
|
fabrik/snippets/betterforms.py
|
Python
|
gpl-2.0
| 8,496 | 0.005532 |
"""
Copyright (c) 2008, Carl J Meyer
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The names of its contributors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Time-stamp: <2008-11-21 01:54:45 carljm forms.py>
"""
from copy import deepcopy
from django import forms
from django.forms.util import flatatt
from django.utils.safestring import mark_safe
class Fieldset(object):
"""
An iterable Fieldset with a legend and a set of BoundFields.
"""
def __init__(self, form, name, boundfields, legend=None, description=''):
self.form = form
self.boundfields = boundfields
if legend is None: legend = name
self.legend = mark_safe(legend)
self.description = mark_safe(de
|
scription)
|
self.name = name
def __iter__(self):
for bf in self.boundfields:
yield _mark_row_attrs(bf, self.form)
def __repr__(self):
return "%s('%s', %s, legend='%s', description='%s')" % (
self.__class__.__name__, self.name,
[f.name for f in self.boundfields], self.legend, self.description)
class FieldsetCollection(object):
def __init__(self, form, fieldsets):
self.form = form
self.fieldsets = fieldsets
def __len__(self):
return len(self.fieldsets) or 1
def __iter__(self):
if not self.fieldsets:
self.fieldsets = (('main', {'fields': self.form.fields.keys(),
'legend': ''}),)
for name, options in self.fieldsets:
try:
field_names = [n for n in options['fields']
if n in self.form.fields]
except KeyError:
raise ValueError("Fieldset definition must include 'fields' option." )
boundfields = [forms.forms.BoundField(self.form, self.form.fields[n], n)
for n in field_names]
yield Fieldset(self.form, name, boundfields,
options.get('legend', None),
options.get('description', ''))
def _get_meta_attr(attrs, attr, default):
try:
ret = getattr(attrs['Meta'], attr)
except (KeyError, AttributeError):
ret = default
return ret
def get_fieldsets(bases, attrs):
"""
Get the fieldsets definition from the inner Meta class, mapping it
on top of the fieldsets from any base classes.
"""
fieldsets = _get_meta_attr(attrs, 'fieldsets', ())
new_fieldsets = {}
order = []
for base in bases:
for fs in getattr(base, 'base_fieldsets', ()):
new_fieldsets[fs[0]] = fs
order.append(fs[0])
for fs in fieldsets:
new_fieldsets[fs[0]] = fs
if fs[0] not in order:
order.append(fs[0])
return [new_fieldsets[name] for name in order]
def get_row_attrs(bases, attrs):
"""
Get the row_attrs definition from the inner Meta class.
"""
return _get_meta_attr(attrs, 'row_attrs', {})
def _mark_row_attrs(bf, form):
row_attrs = deepcopy(form._row_attrs.get(bf.name, {}))
if bf.field.required:
req_class = 'required'
else:
req_class = 'optional'
if 'class' in row_attrs:
row_attrs['class'] = row_attrs['class'] + ' ' + req_class
else:
row_attrs['class'] = req_class
bf.row_attrs = mark_safe(flatatt(row_attrs))
return bf
class BetterFormBaseMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fieldsets'] = get_fieldsets(bases, attrs)
attrs['base_row_attrs'] = get_row_attrs(bases, attrs)
new_class = super(BetterFormBaseMetaclass,
cls).__new__(cls, name, bases, attrs)
return new_class
class BetterFormMetaclass(BetterFormBaseMetaclass,
forms.forms.DeclarativeFieldsMetaclass):
pass
class BetterModelFormMetaclass(BetterFormBaseMetaclass,
forms.models.ModelFormMetaclass):
pass
class BetterBaseForm(object):
"""
``BetterForm`` and ``BetterModelForm`` are subclasses of Form
and ModelForm that allow for declarative definition of fieldsets
and row_attrs in an inner Meta class.
The row_attrs declaration is a dictionary mapping field names to
dictionaries of attribute/value pairs. The attribute/value
dictionaries will be flattened into HTML-style attribute/values
(i.e. {'style': 'display: none'} will become ``style="display:
none"``), and will be available as the ``row_attrs`` attribute of
the ``BoundField``. Also, a CSS class of "required" or "optional"
will automatically be added to the row_attrs of each
``BoundField``, depending on whether the field is required.
The fieldsets declaration is a list of two-tuples very similar to
the ``fieldsets`` option on a ModelAdmin class in
``django.contrib.admin``.
The first item in each two-tuple is a name for the fieldset (must
be unique, so that overriding fieldsets of superclasses works),
and the second is a dictionary of fieldset options
Valid fieldset options in the dictionary include:
``fields`` (required): A tuple of field names to display in this
fieldset.
``classes``: A list of extra CSS classes to apply to the fieldset.
``legend``: This value, if present, will be the contents of a
``legend`` tag to open the fieldset. If not present the unique
name of the fieldset will be used (so a value of '' for legend
must be used if no legend is desired.)
``description``: A string of optional extra text to be displayed
under the ``legend`` of the fieldset.
When iterated over, the ``fieldsets`` attribute of a
``BetterForm`` (or ``BetterModelForm``) yields ``Fieldset``s.
Each ``Fieldset`` has a name attribute, a legend attribute, and a
description attribute, and when iterated over yields its
``BoundField``s.
For backwards compatibility, a ``BetterForm`` or
``BetterModelForm`` can still be iterated over directly to yield
all of its ``BoundField``s, regardless of fieldsets.
For more detailed examples, see the doctests in tests/__init__.py.
"""
def __init__(self, *args, **kwargs):
self._fieldsets = deepcopy(self.base_fieldsets)
self._row_attrs = deepcopy(self.base_row_attrs)
super(BetterBaseForm, self).__init__(*args, **kwargs)
@property
def fieldsets(self):
return FieldsetCollection(self, self._fieldsets)
def __iter__(self):
for bf in super(BetterBaseForm, self).__iter__():
|
Abhayakara/minder
|
smtpd/smtpd.py
|
Python
|
gpl-3.0
| 27,553 | 0.013283 |
#!/usr/bin/env python3
import dns.resolver
import dns.rdatatype
# This shouldn't be necessary, but for some reason __import__ when
# called from a coroutine, doesn't always work, and I haven't been
# able to figure out why. Possibly this is a 3.4.0 bug that's fixed
# later, but googling for it hasn't worked.
import dns.rdtypes.ANY.MX
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.NS
import smtp
import ssl as tls
import asyncio
import sys
import pdb
import os
import pwd
import socket
import base64
import hashlib
import time
import mailbox
import email
import email.parser
import email.utils
import email.header
import syslog
from concurrent.futures import FIRST_COMPLETED;
mindhome = "/etc/minder"
class coldb:
def parsefile(self, filename):
cf = open(filename, "r")
for line in cf:
line = line.rstrip()
fields = line.split(":")
self.process_fields(fields)
cf.close()
class tlsconf(coldb):
tlsctx = None
cert = None
key = None
name = None
def __init__(self, conffile=(mindhome + "/tls.conf")):
self.parsefile(conffile)
# TLS Context for incoming TLS connections:
# XXX this should be in a separate process!
# It may seem a bit contrary to practice that which ciphers and
# protocols are supported is hardcoded. The reason for this is
# that the end-user doesn't know from ciphers and protocols, and
# so we choose as secure a selection as we can.
#
# This is arguably problematic, because we might prefer crappy
# security to no security for TLS delivery, but we demand good
# security for maildrops, and have no way to distinguish whether
# this is a maildrop or a transfer until _after_ the TLS
# connection is established.
#
# Once STARTTLS support is implemented, we could allow
# maildrops only on the TLS port (465), and reject maildrops on
# the main port (25) and the STARTTLS port (587).
self.tlsctx = tls.SSLContext(tls.PROTOCOL_SSLv23)
self.tlsctx.options = (tls.OP_NO_COMPRESSION | tls.OP_SINGLE_DH_USE |
tls.OP_SINGLE_ECDH_USE |
tls.OP_NO_SSLv2 | tls.OP_NO_SSLv3)
self.tlsctx.verify_mode = tls.CERT_NONE # we don't want client certs
self.tlsctx.set_ciphers("ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" +
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:" +
"RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5")
self.tlsctx.load_cert_chain(self.cert, self.key)
def process_fields(self, fields):
if fields[0] == "name":
self.name = fields[1]
count = 2
elif fields[0] == "cert":
self.cert = fields[1]
count = 2
elif fields[0] == "key":
self.key = fields[1]
count = 2
else:
raise Exception("Unknown TLS setting: ", ":".join(fields))
if len(fields) > count:
raise Exception("Too many fields: ", ":".join(fields))
class userdb(coldb):
users = {}
domains = []
wildcard_domains = []
def __init__(self):
self.parsefile(mindhome + "/userdb")
def authenticate(self, username, password):
# Treat the username and password as if they are UTF-8.
# Encoding is not well specified here, so this could cause
# interop problems.
address = self.parse_address(str(username, encoding="utf-8"))
if address == None:
return None
if address[0] not in self.users:
return None
# Find the user entry for the given domain. If a user's
# domain is expressed as a wildcard, we prepend "*." to the
# domain we parsed out of the authentication data to find it,
# since it would be bogus to try to explain to the user why
# their username is jruser@*.example.com.
udmap = self.users[address[0]]
if address[1] not in udmap:
if "*." + address[1] in udmap:
udata = udmap["*." + address[1]]
else:
return None
else:
udata = udmap[address[1]]
hash = base64.standard_b64decode(udata["pass"])
salt = hash[32:]
sha = hashlib.sha256()
sha.update(password)
sha.update(salt)
chash = sha.digest()
# We return the mailbox so that we can use it to validate
# outgoing addresses later--any incoming address that winds
# up in the mailbox of the user who validated is a valid
# outgoing email address for that user.
if chash == hash[:32]:
return udata
return None
def process_fields(self, fields):
# user:mailbox:password:domains...
if len(fields) < 4:
raise Exception("invalid user database entry: %s" % line)
user = fields[0]
mbox = fields[1]
passw = fields[2]
udomains = fields[3:]
if user in self.users:
udmap = users[user]
else:
udmap = {}
for domain in udomains:
udmap[domain] = {'mbox': mbox, 'pass': passw}
if domain[0] == '*' and domain[1] == '.':
if domain not in self.wildcard_domains:
self.wildcard_domains.append(domain)
elif domain not in self.domains:
self.domains.append(domain)
self.users[user] = udmap
def parse_address(self, address):
# Python's parseaddr function doesn't actually do the right thing
# here, so for now this is going to be a very manual process,
# more's the pity.
# XXX does this work with unicode?
parts = address.lower().split("@")
if len(parts) != 2:
return None
user = parts[0]
domain = parts[1]
return [user, domain]
def find_wildcard(self, subdomain, domains):
splode = subdomain.split(".")
for i in range(0, len(splode)):
wildsub = "*." + ".".join(splode[i:])
if wildsub in domains:
return wildsub
return None
def validate_domain(self, address):
# assume address is output of parse_address
domain = address[1]
if domain not in self.domains:
wildcard = self.find_wildcard(domain, self.wildcard_domains)
if wildcard != None:
return True
return False
else:
return True
def find_slot(self, address):
user = address[0]
domain = address[1]
if user not in self.users:
return None
udomains = self.users[user]
for udomain in udomains:
if domain == udomain:
return udomains[udomain]
wildcard = self.find_wildcard(domain, udomains)
if wildcard != None:
return udomains[wildcard]
return None
def validate_address(self, address):
slot = self.find_slot(address)
if slot == None:
return False
return True
class msmt
|
p(smtp.server):
userdb = None
mailbox = None
connections = {}
connection_list = []
message = None
# If we are authenticated, make sure the mail is from
# the authenticated user; if not, make sure that the
# sender passes basic anti-spam checks.
def validate_mailfrom(self, address):
if self.authenticated:
return sel
|
f.validate_fromuser(address)
else:
return self.validate_sender(address)
def validate_sender(self, address):
# Add sender validation fu here:
return True
@asyncio.coroutine
def validate_fromuser(self, address):
addr = self.userdb.parse_address(address)
# First just check that it's a valid local address
if not self.validate_mailbox(addr):
print("not a local address: ", repr(addr))
return False
# Now check to see if the address delivers to the
# specified mailbox, which should be the mailbox
# of the authenticated user.
slot = self.userdb.find_slot(addr)
if (self.mailbox != None and self.authenticated and
slot["mbox"] == self.mailbox):
self.mail_from = address
self.from_domain = addr[1]
return True
self.push("550 Not authorized.")
return False
def validate_rcptto(self, address):
print("validate_rcptto:", address)
udbaddr = self.userdb.parse_address(address)
if udbaddr == None:
self.push("501 Syntax: RCPT TO: <address>")
syslog.syslog(syslog.LOG_INFO, "501 Syntax: RCPT TO: %s" % address)
return False
if self.authenticated:
print("validate_recipient")
return self.validat
|
luo-chengwei/utilitomics
|
utils/align.PE.py
|
Python
|
gpl-3.0
| 826 | 0.029056 |
import sys
import os
import re
from subprocess import PIPE, Popen, call
fq1, fq2, db, prefix = sys.argv[1:]
bowtie2_logfh = open(prefix+'.bowtie2.log','w')
bamfile = prefix+'.bam'
bowtie2_cmd = ['bowtie2', '-x', db, '-1', fq1, '-2', fq2]
samtools_view = ['samtools', 'view', '-bhS', '-']
samtools_sort = ['samtools', 'sort', '-', prefix]
samtools_index = ['samtools', 'index', bamfile]
p1 = Popen(bowtie2_cmd, stdo
|
ut = PIPE, stderr = bowtie2_logfh)
p2 = Popen(samtools_view, stdin = p1.stdout, stdout = PIPE, stderr = bowtie2_logfh)
p3 = Popen(samtools_sort, stdin = p2.stdout, stdout =
|
PIPE, stderr = bowtie2_logfh)
p1.stdout.close()
p2.stdout.close()
output, err = p3.communicate()
samtools_index = ['samtools', 'index', bamfile]
call(samtools_index, stderr = bowtie2_logfh, stdout = bowtie2_logfh)
bowtie2_logfh.close()
|
josiah-wolf-oberholtzer/consort
|
consort/tools/TimespanSpecifier.py
|
Python
|
mit
| 1,111 | 0.0036 |
import abjad
from abjad.tools import abctools
class TimespanSpecifier(abctools.AbjadValueObject):
### CLASS VARIABLES ###
__slots__ = (
'_forbid_fusing',
'_forbid_splitting',
'_minimum_duration',
)
### INITIALIZER ###
def __init__(
self,
forbid_fusing=None,
forbid_splitting=None,
minimum_duration=None,
):
if forbid_fusing is not None:
forbid_fusing = bool(forbid_fusing)
self._forbid_fusing = forbid_fusing
|
if forbid_splitting is not Non
|
e:
forbid_splitting = bool(forbid_splitting)
self._forbid_splitting = forbid_splitting
if minimum_duration is not None:
minimum_duration = abjad.Duration(minimum_duration)
self._minimum_duration = minimum_duration
### PUBLIC PROPERTIES ###
@property
def forbid_fusing(self):
return self._forbid_fusing
@property
def forbid_splitting(self):
return self._forbid_splitting
@property
def minimum_duration(self):
return self._minimum_duration
|
gamingrobot/SpockBot
|
spockbot/plugins/helpers/keepalive.py
|
Python
|
mit
| 451 | 0 |
"""
KeepalivePlugin is a pretty cool guy. Eh reflects keep alive packets and doesnt
afraid of anything.
"""
from spockbot.plugins.base import PluginBase
class KeepalivePlugin(PluginBase):
requires = 'Net'
events = {
'PLAY<Keep Alive': 'handle_keep_alive',
}
# Keep Alive - Reflect
|
s data
|
back to server
def handle_keep_alive(self, name, packet):
packet.new_ident('PLAY>Keep Alive')
self.net.push(packet)
|
ivecera/gentoo-overlay
|
dev-python/python-krbV/files/setup.py
|
Python
|
apache-2.0
| 714 | 0.068627 |
from distutils.core import setup, Extension
setup (name = 'krbV',
version = '1.0.90',
description = 'Kerberos V Bindings for Python',
long_description = """
python-krbV allows python programs to use Kerberos 5 auth
|
entication/security
""",
author = 'Test',
author_email = 'mikeb@redhat.com',
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License (LGPL)',
'Operating System :: POSIX :: Linux',
'Programming Language :: C',
'Topic :: System :: Sys
|
tems Administration :: Authentication/Directory'
],
ext_modules = [Extension ('krbV',
[ 'krb5util.c', 'krb5module.c', 'krb5err.c' ],
libraries = ['krb5', 'com_err']
)
]
)
|
daviddrysdale/python-phonenumbers
|
python/tests/testdata/region_RU.py
|
Python
|
apache-2.0
| 630 | 0.009524 |
"""Auto-generated file, do not edit by hand. RU metadata"""
from pho
|
nenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_RU = PhoneMetadata(id='RU', country_code=7, international_prefix='810',
general_desc=PhoneNumberDesc(national_number_pattern='[347-9]\\d{9}', possible_length=(10,)),
fixed_line=PhoneNumberDesc(national_number_pattern='[348]\\d{9}', example_number='3011234567', possible_length=(10,)),
mobile=P
|
honeNumberDesc(national_number_pattern='9\\d{9}', example_number='9123456789', possible_length=(10,)),
national_prefix='8',
national_prefix_for_parsing='8')
|
Eric89GXL/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
Python
|
bsd-3-clause
| 4,240 | 0.000236 |
import sys
import re
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
from the same input
"""
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
the same input even when the input is sparse, and test against non-sparse
"""
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_stat
|
e=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]
|
), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
""" just seek if we don't get NaNs sampling the full digits dataset """
rng = np.random.RandomState(42)
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=10,
n_iter=20, random_state=rng)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
def test_score_samples():
"""Check that the pseudo likelihood is computed without clipping.
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert((rbm1.score_samples(X) < -300).all())
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
"""
Make sure RBM works with sparse input when verbose=True
"""
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert(re.match(r"Iteration 0, pseudo-likelihood = -?(\d)+(\.\d+)?",
s))
finally:
sio = sys.stdout
sys.stdout = old_stdout
|
zhouqilin1993/IntelliDE
|
crawler/ForumSpider/ForumSpider/valide/picget.py
|
Python
|
gpl-3.0
| 3,264 | 0.034007 |
# This is a program for IP limit using picture recognition.
# URL: http://bbs.csdn.net/human_validations/new
# Input: human validations page
# Get the jpeg from the url.
# use picture recognition to get the string from the picture.
# Authentication pass!
#
# this is try to use selenuim to login
import re,os,sys
import time
import urllib2
import cookielib
import urllib
from cookielib import CookieJar
import pytesseract
from selenium import webdriver
from PIL import Image,ImageFilter,ImageEnhance
from selenium.webdriver.common import action_chains
from selenium.webdriver.common.keys import Keys
class PicGet:
def image_to_text(self, img):
text = pytesseract.image_to_string(img)
text = re.sub('[\W]', '', text)
return text
def imageToString(self,picname):
image = Image.open(picname)
ValidCode = self.image_to_text(image)
image.save('captcha.png')
return ValidCode
def validlogin(self,driver,cookie,validcode):
# use the validcode to authentication
PostUrl = "http://bbs.csdn.net/human_validations"
elem = driver.find_element_by_id("captcha")
elem.send_keys(validcode)
elem.send_keys(Keys.TAB)
time.sleep(3)
driver.find_element_by_xpath('//button[@type="submit"]').send_keys(Keys.ENTER)
#submit_button.send_keys(Keys.ENTER)
print "test"
cur_url = driver.current_url
# print (cur_url)
if cur_url == PostUrl:
return True
else:
return False
def validImageGet(self):
AuthUrl = "http://bbs.csdn.net/human_validations/new"
picname = 'captcha.png'
sel = webdriver.Chrome()
sel.get(AuthUrl)
cookie = sel.get_cookies()
auth_token = sel.find_element_by_xpath('//input[@name="authenticity_token"]')
captcha_key = sel.find_element_by_xpath('//input[@id="captcha_key"]')
# submit_button = sel.find_element_by_xpath('//button[@type="submit"]')
# submit_button.submit()
time.sleep(0.3)
picItem = sel.find_element_by_xpath('//img[@alt="captcha"]')
# submit_button = sel.find_element_by_xpath('//button[@type="submit"]')
sel.save_screenshot(picname)
left = int(picItem.location['x'])
top = int(picItem.location['y'])
right = int(picItem.location['x'] + picItem.size['widt
|
h'])
bottom = int(picItem.location['y'] + picItem.size['height'])
im = Image.open(picname)
# print (left,top,right,bottom)
im = im.crop((left, top, right, bottom))
im.save(picname)
# validcode picture recognize
time.sleep(0.5)
validcode = self.imageToString(picname)
print (validcode)
validcode = "RCNCUB"
#validcode = input("please input:")
if re.match('[A-Z]{6}',val
|
idcode):
if self.validlogin(sel,cookie,validcode):
print ('Auth Success!')
else:
print ('Auth Fail!')
#picItem.send_keys(Keys.TAB)
#submit_button.send_keys(Keys.ENTER)
#submit_button.click()
# try:
# submit_button.click()
# except Exception,e:
# print (Exception,":",e)
# validcode = input("please input:")
# if True: # if (len(validcode) == 6) & validcode.isalnum():
# if self.validpost(cookie,auth_token,validcode,captcha_key):# if self.validlogin(sel,cookie,validcode):
# print ('Authentication Pass!')
# break
# else:
# submit_button.click()
time.sleep(5)
sel.quit()
if __name__ == '__main__':
ValidTest = PicGet()
ValidTest.validImageGet()
|
freieslabor/info-display
|
info_display/screens/event_schedule/apps.py
|
Python
|
mpl-2.0
| 158 | 0 |
from django.apps import
|
AppConfig
class CalendarFeedConfig(AppConfig):
n
|
ame = 'info_display.screens.event_schedule'
verbose_name = 'Event Schedule'
|
ChromiumWebApps/chromium
|
tools/telemetry/telemetry/page/cloud_storage.py
|
Python
|
bsd-3-clause
| 6,562 | 0.012496 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrappers for gsutil, for basic interaction with Google Cloud Storage."""
import cStringIO
import hashlib
import logging
import os
import subprocess
import sys
import tarfile
import urllib2
from telemetry.core import util
PUBLIC_BUCKET = 'chromium-telemetry'
INTERNAL_BUCKET = 'chrome-telemetry'
_GSUTIL_URL = 'http://storage.googleapis.com/pub/gsutil.tar.gz'
_DOWNLOAD_PATH = os.path.join(util.GetTelemetryDir(), 'third_party', 'gsutil')
class CloudStorageError(Exception):
@staticmethod
def _GetConfigInstructions(gsutil_path):
if SupportsProdaccess(gsutil_path):
return 'Run prodaccess to authenticate.'
else:
return ('To configure your credentials:\n'
' 1. Run "%s config" and follow its instructions.\n'
' 2. If you have a @google.com account, use that account.\n'
' 3. For the project-id, just enter 0.' % gsutil_path)
class PermissionError(CloudStorageError):
def __init__(self, gsutil_path):
super(PermissionError, self).__init__(
'Attempted to access a file from Cloud Storage but you don\'t '
'have permission. ' + self._GetConfigInstructions(gsutil_path))
class CredentialsError(CloudStorageError):
def __init__(self, gsutil_path):
super(CredentialsError, self).__init__(
'Attempted to access a file from Cloud Storage but you have no '
'configured credentials. ' + self._GetConfigInstructions(gsutil_path))
class NotFoundError(CloudStorageError):
pass
# TODO(tonyg/dtu): Can this be replaced with distutils.spawn.find_executable()?
def _FindExecutableInPath(relative_executable_path, *extra_search_paths):
for path in list(extra_search_paths) + os.environ['PATH'].split(os.pathsep):
executable_path = os.path.join(path, relative_executable_path)
if os.path.isfile(executable_path) and os.access(executable_path, os.X_OK):
return executable_path
return None
def _DownloadGsutil():
logging.info('Downloading gsutil')
response = urllib2.urlopen(_GSUTIL_URL)
with tarfile.open(fileobj=cStringIO.StringIO(response.read())) as tar_file:
tar_file.extractall(os.path.dirname(_DOWNLOAD_PATH))
logging.info('Downloaded gsutil to %s' % _DOWNLOAD_PATH)
return os.path.join(_DOWNLOAD_PATH, 'gsutil')
def FindGsutil():
"""Return the gsutil executable path. If we can't find it, download it."""
# Look for a depot_tools inst
|
allation.
gsutil_path = _FindExecutableInPath(
os.path.join('third_party', 'gsutil', 'gsutil'), _DOWNLOAD_PATH)
if gsutil_path:
return gsutil_path
# Look for a gsutil installation.
gsutil_path = _FindExecutableInPath('gsutil', _DOWNLOAD_PATH)
if gsutil_path:
return gsutil_path
# Failed to find it. Download it!
return _DownloadGsutil()
def SupportsProdaccess(gsut
|
il_path):
def GsutilSupportsProdaccess():
with open(gsutil_path, 'r') as gsutil:
return 'prodaccess' in gsutil.read()
return _FindExecutableInPath('prodaccess') and GsutilSupportsProdaccess()
def _RunCommand(args):
gsutil_path = FindGsutil()
gsutil = subprocess.Popen([sys.executable, gsutil_path] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = gsutil.communicate()
if gsutil.returncode:
if stderr.startswith((
'You are attempting to access protected data with no configured',
'Failure: No handler was ready to authenticate.')):
raise CredentialsError(gsutil_path)
if 'status=403' in stderr or 'status 403' in stderr:
raise PermissionError(gsutil_path)
if stderr.startswith('InvalidUriError') or 'No such object' in stderr:
raise NotFoundError(stderr)
raise CloudStorageError(stderr)
return stdout
def List(bucket):
query = 'gs://%s/' % bucket
stdout = _RunCommand(['ls', query])
return [url[len(query):] for url in stdout.splitlines()]
def Exists(bucket, remote_path):
try:
_RunCommand(['ls', 'gs://%s/%s' % (bucket, remote_path)])
return True
except NotFoundError:
return False
def Move(bucket1, bucket2, remote_path):
url1 = 'gs://%s/%s' % (bucket1, remote_path)
url2 = 'gs://%s/%s' % (bucket2, remote_path)
logging.info('Moving %s to %s' % (url1, url2))
_RunCommand(['mv', url1, url2])
def Delete(bucket, remote_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Deleting %s' % url)
_RunCommand(['rm', url])
def Get(bucket, remote_path, local_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Downloading %s to %s' % (url, local_path))
_RunCommand(['cp', url, local_path])
def Insert(bucket, remote_path, local_path, publicly_readable=False):
url = 'gs://%s/%s' % (bucket, remote_path)
command_and_args = ['cp']
extra_info = ''
if publicly_readable:
command_and_args += ['-a', 'public-read']
extra_info = ' (publicly readable)'
command_and_args += [local_path, url]
logging.info('Uploading %s to %s%s' % (local_path, url, extra_info))
_RunCommand(command_and_args)
def GetIfChanged(file_path, bucket=None):
"""Gets the file at file_path if it has a hash file that doesn't match.
If the file is not in Cloud Storage, log a warning instead of raising an
exception. We assume that the user just hasn't uploaded the file yet.
Returns:
True if the binary was changed.
"""
hash_path = file_path + '.sha1'
if not os.path.exists(hash_path):
return False
with open(hash_path, 'rb') as f:
expected_hash = f.read(1024).rstrip()
if os.path.exists(file_path) and GetHash(file_path) == expected_hash:
return False
if bucket:
buckets = [bucket]
else:
buckets = [PUBLIC_BUCKET, INTERNAL_BUCKET]
found = False
for bucket in buckets:
try:
url = 'gs://%s/%s' % (bucket, expected_hash)
_RunCommand(['cp', url, file_path])
logging.info('Downloaded %s to %s' % (url, file_path))
found = True
except NotFoundError:
continue
if not found:
logging.warning('Unable to find file in Cloud Storage: %s', file_path)
return found
def GetHash(file_path):
"""Calculates and returns the hash of the file at file_path."""
sha1 = hashlib.sha1()
with open(file_path, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
|
jrmendozat/mtvm
|
Sede/models.py
|
Python
|
gpl-2.0
| 1,385 | 0.005776 |
from django.db import models
#from Cliente.models import Cliente_Direccion
# Create your models here.
class Tipo_sede(models.Model):
nombre = models.CharField(max_length=50, unique=True)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = "Tipo de sede"
verbose_name_plural = "Tipos de sede"
class Sede(models.Model):
tipo = models.ForeignKey(Tipo_sede)
sede = models.CharField(max_length=250)
piso = models.IntegerField(default=0)
piso_por_escalera = models.IntegerField(default=0)
numero_ambiente = models.IntegerField(default=1)
#direccion_cliente = mo
|
dels.OneToOneField(Cliente_Direccion)
def __unicode__(self):
return u'%s'%(self.tipo)
class Meta:
verbose_name = "Sede"
verbose_name_plural = "Sedes"
class Tipo_Ambiente(models.Model):
tipo_ambiente = models.CharField(max_length=50, unique=True)
def __unicode__(self):
return self.tipo_ambiente
class Meta:
verbose_name = "Tipo de ambiente"
verbose_name_plural = "Tipos de ambi
|
entes"
class Ambiente(models.Model):
ambiente = models.ForeignKey(Tipo_Ambiente)
sede = models.ForeignKey(Sede)
def __unicode__(self):
return u'%s - %s'%(self.ambiente, self.sede)
class Meta:
verbose_name = "Ambiente"
verbose_name_plural = "Ambientes"
|
apdjustino/DRCOG_Urbansim
|
src/opus_gui/data_manager/data_manager_functions.py
|
Python
|
agpl-3.0
| 1,946 | 0.004111 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import lxml
'''
A set of functions related to Data Manager and the <data_manager> node
in Opus Project Configuration files.
'''
def get_tool_nodes(project):
'''
Retrieve a list of all nodes that represent tools in the given project
@param project (OpusProject) project to fetch nodes from
@return a list of nodes representing the tools (list(Element))
'''
tool_nodes = []
tool_group_nodes = get_tool_library_node(project).findall("tool_group")
for tool_group in tool_group_nodes:
tool_nodes.extend(tool_group.findall("tool"))
return tool_nodes
def get_tool_node_by_name(project, tool_name):
'''
Fetch a node representing a tool based in it's name.
@param project (OpusProject) project to fetch node from
@param tool_name (str) name of the tool to fetch
@return the node (Element) or None if the node was not found
'''
for node in get_tool_nodes(project):
if node.get('name') == tool_name:
return node
return None
def get_tool_library_node(project):
'''
Get a reference to the tool library for the given project
@param project (OpusProject) project to operate on
@return the node representing the tool library (Element) or None if the
project does not contain a tool library.
'''
if type(project) == lxml.etree._Element and project.tag == "tool_library": return project
return project.find('data_manager/tool_library')
def get_path_to_tool_modules(project):
'''
Get the path to the tool modules
@param project (
|
OpusProject) project to operate on
@return the text representing the path or
|
None if not found
'''
node = project.find('data_manager/path_to_tool_modules')
if node is not None: return node.text
return None
|
mrfesol/easyAI
|
easyAI/AI/TT.py
|
Python
|
mit
| 2,413 | 0.007874 |
"""
This module implements transposition tables, which store positions
and moves to speed up the AI.
"""
import pickle
from easyAI.AI.DictTT import DictTT
class TT:
"""
A tranposition table made out of a Python dictionnary.
It can only be used on games which have a method
game.ttentry() -> string, or tuple
Usage:
>>> table = TT(DictTT(1024)) or table = TT() for default dictionary
>>> ai = Negamax(8, scoring, tt = table) # boosted Negamax !
>>> ai(some_game) # computes a move, fills the table
>>> table.to_file('saved_tt.data')
|
# maybe save for later ?
>>> # later...
>>> table = TT.fromfile('saved_tt.data')
>>> ai = Negamax(8, scoring, tt = table) # boosted Negamax !
Transposition tables can also be used as an AI (``AI_player(tt)``)
|
but they must be exhaustive in this case: if they are asked for
a position that isn't stored in the table, it will lead to an error.
"""
def __init__(self, own_dict = None):
self.d = own_dict if own_dict != None else dict()
def lookup(self, game):
""" Requests the entry in the table. Returns None if the
entry has not been previously stored in the table. """
return self.d.get(game.ttentry(), None)
def __call__(self,game):
"""
This method enables the transposition table to be used
like an AI algorithm. However it will just break if it falls
on some game state that is not in the table. Therefore it is a
better option to use a mixed algorithm like
>>> # negamax boosted with a transposition table !
>>> Negamax(10, tt= my_dictTT)
"""
return self.d[game.ttentry()]['move']
def store(self, **data):
""" Stores an entry into the table """
entry = data.pop("game").ttentry()
self.d[entry] = data
def tofile(self, filename):
""" Saves the transposition table to a file. Warning: the file
can be big (~100Mo). """
with open(filename, 'w+') as f:
pickle.dump(self, f)
@staticmethod
def fromfile(self, filename):
""" Loads a transposition table previously saved with
``TT.tofile`` """
with open(filename, 'r') as f:
pickle.load(self, filename)
|
craigatron/freedoge
|
freedoge/wsgi.py
|
Python
|
mit
| 426 | 0.004695 |
"""
WSGI config for freedoge project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdef
|
ault("DJANGO_SETTINGS_MODULE", "freedoge.settings")
|
from dj_static import Cling
from django.core.wsgi import get_wsgi_application
application = Cling(get_wsgi_application())
|
danielsunzhongyuan/my_leetcode_in_python
|
search_a_2d_matrix_ii_240.py
|
Python
|
apache-2.0
| 895 | 0.001117 |
# @author: Zhongyuan Sun
# time: O(log(m*n)), space: O(1)
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
# Solution One: 122ms, beats 50.00%
# if not matrix or not matrix[0]:
# return False
# m = len(matrix)
# n = len(matrix[0])
# i, j = m - 1, 0
# while i >= 0 and j < n:
# if matrix[i][j] > target:
# i -= 1
# elif matr
|
ix[i][j] < target:
# j += 1
# else:
# return True
# return False
# Solution Two: 216ms
|
, beats 21.36%
if not matrix or not matrix[0]:
return False
for line in matrix:
if target in line:
return True
return False
|
rooshilp/CMPUT410Lab6
|
virt_env/virt1/lib/python2.7/site-packages/django/contrib/staticfiles/storage.py
|
Python
|
apache-2.0
| 14,802 | 0.000608 |
from __future__ import unicode_literals
from collections import OrderedDict
import hashlib
import os
import posixpath
import re
import json
from django.conf import settings
from django.core.cache import (caches, InvalidCacheBackendError,
cache as default_cache)
from django.core.ex
|
ceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.six.moves.urllib.parse import unquote, urlsplit, urlunsplit, urldefrag
|
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class HashedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(HashedFilesMixin, self).__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Retuns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
hashed_name = self.stored_name(clean_name)
final_url = super(HashedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
matches = lambda path: matc
|
bnsantos/python-junk-code
|
tests/math/abacusTest.py
|
Python
|
gpl-2.0
| 5,476 | 0.000183 |
__author__ = 'bruno'
import unittest
import algorithms.math.abacus as Abacus
class TestAbacus(unittest.TestCase):
def setUp(self):
pass
def test_abacus1(self):
abacus = Abacus.generate_abacus(0)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |'], abacus)
def test_abacus2(self):
abacus = Abacus.generate_abacus(8)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00 000*****|'], abacus)
def test_abacus3(self):
abacus = Abacus.generate_abacus(32)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000** ***|',
'|00000*** **|'], abacus)
def test_abacus4(self):
abacus = Abacus.generate_abacus(147)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000**** *|',
'|00000* ****|',
'|000 00*****|'], abacus)
def test_abacus5(self):
abacus = Abacus.generate_abacus(986)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|0 0000*****|',
'|00 000*****|',
'|0000 0*****|'], abacus)
def test_abacus6(self):
abacus = Abacus.generate_abacus(5821)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000 *****|',
'|00 000*****|',
'|00000*** **|',
'|00000**** *|'], abacus)
def test_abacus7(self):
abacus = Abacus.generate_abacus(1234)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000**** *|',
'|00000*** **|',
'|00000** ***|',
'|00000* ****|'], abacus)
def test_abacus8(self):
abacus = Abacus.generate_abacus(999)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|0 0000*****|',
'|0 0000*****|',
'|0 0000*****|'], abacus)
def test_abacus9(
|
self):
abacus = Abacus.generate_abacus(13)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
|
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000**** *|',
'|00000** ***|'], abacus)
def test_abacus10(self):
abacus = Abacus.generate_abacus(49)
self.assertEqual(['|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000***** |',
'|00000* ****|',
'|0 0000*****|'], abacus)
|
JanHendrikDolling/configvalidator
|
configvalidator/tools/basics.py
|
Python
|
apache-2.0
| 6,745 | 0.00089 |
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by Jan-Hendrik Dolling.
:license: Apache 2.0, see LICENSE for more details.
"""
import abc
import six
import json
import logging
from six import string_types
from collections import namedtuple
from configvalidator.tools.exceptions import LoadException, ValidatorException
from configvalidator.tools.parser import ParseObj
logger = logging.getLogger(__name__)
DATA_SECTION_FEATURE = {}
DATA_OPTION_FEATURE = {}
DATA_VALIDATOR = {}
GLOBAL_DATA = {}
def add_data(key, value):
"""Add a value to the global data store
Validators and Features can access this data.
If you create an object an *data* attribute is automatic added to the instance.
This data attribute hold all information that there president during initialization.
So it's possible to add additional meta data to Validators and Features.
Args:
key: The key under which that information is stored.
value: The information
"""
GLOBAL_DATA[key] = value
def remove_data(key):
"""remove a value from the global data store
This removes the data only for new instances.
The information remain available under the key for existing instances.
Args:
key: The key under which that information is stored.
"""
del GLOBAL_DATA[key]
def load_validator(validator_name):
"""loads a validator class
Args:
validator_name: the validator name
Returns:
A validator class which than can be instanced
Raises:
KeyError: iff the validator_name is unknown
"""
try:
return DATA_VA
|
LIDATOR[validator_name]
except KeyError:
raise LoadException("no validator with the name {name}".format(name=validator_name))
def load_section_feature(feature_name):
try:
return DATA_SECTION_FEATURE[feature_name]
except KeyError:
raise LoadException(
"no Section feature with the name {name}"
|
.format(name=feature_name))
def load_option_feature(feature_name):
try:
return DATA_OPTION_FEATURE[feature_name]
except KeyError:
raise LoadException(
"no option feature with the name {name}".format(name=feature_name))
def load_validator_form_dict(option_dict):
validator_class_name = "default"
validator_class_dict = {}
if isinstance(option_dict, dict) and "validator" in option_dict and option_dict["validator"] is not None:
if isinstance(option_dict["validator"], string_types):
validator_class_name = option_dict["validator"]
else:
validator_class_dict = option_dict["validator"]
if "type" in validator_class_dict:
validator_class_name = validator_class_dict["type"]
del validator_class_dict["type"]
return load_validator(validator_class_name), validator_class_dict
def list_objects():
return dict(validators=[x for x in DATA_VALIDATOR],
option_features=[x for x in DATA_OPTION_FEATURE],
section_features=[x for x in DATA_SECTION_FEATURE])
def decorate_fn(func):
def with_check_input_is_string(self, value):
if not isinstance(value, string_types):
raise ValidatorException("input must be a string.")
return func(self, value)
return with_check_input_is_string
class CollectMetaclass(abc.ABCMeta):
"""Metaclass which safes the class, so that the loads methods can find them.
all classes with this metaclass are automatically collected
The then can be accessed with there name (which is the class attribute
name or the class name if the class has no attribute entry_name)
"""
def __init__(self, name, bases, dct):
"""
called then a new class is created.
the method sets the "name" attribute if not set.
if the attribute inactive is not False, the class
is sort into the Singleton object
- Validator to _env.validators
- Feature to _env.features
"""
super(CollectMetaclass, self).__init__(name, bases, dct)
if object in bases:
# skip base classes
return
if "name" not in dct:
self.name = name
if "inactive" not in dct or dct["inactive"] is not True:
if issubclass(self, Validator):
# only string input for validator functions
self.validate = decorate_fn(self.validate)
DATA_VALIDATOR[self.name] = self
if issubclass(self, SectionFeature):
DATA_SECTION_FEATURE[self.name] = self
if issubclass(self, OptionFeature):
DATA_OPTION_FEATURE[self.name] = self
def __call__(self, *args, **kwargs):
pars_obj = None
if len(args) > 0 and isinstance(args[0], ParseObj):
pars_obj = args[0]
args = args[1:]
res = self.__new__(self, *args, **kwargs)
if isinstance(res, self):
res.data = dict(GLOBAL_DATA)
if pars_obj is not None:
res.data.update(pars_obj.context_data)
res.__init__(*args, **kwargs)
return res
@six.add_metaclass(CollectMetaclass)
class Validator(object):
"""Superclass for Validator's
If you want to write your own Validator use this Superclass.
For Attribute information see Entry class.
a instance lives in one section/option from ini_validator dict
"""
@abc.abstractmethod
def validate(self, value):
"""determine if one input satisfies this validator.
IMPORTAND:
The input is always are String
Args:
value (String): the value to check if it suffused this Validator
Returns:
True or False dependent of if the input suffused the Validator.
"""
@six.add_metaclass(CollectMetaclass)
class SectionFeature(object):
def __init__(self, **kwargs):
"""
:param kwargs: parameter will be ignored
:return:
"""
@abc.abstractmethod
def parse_section(self, parse_obj, section_dict):
"""
:param parse_obj: parser object which stores the data
:param section_dict: the configuration dict for the current section
:return:
"""
@six.add_metaclass(CollectMetaclass)
class OptionFeature(object):
def __init__(self, **kwargs):
"""
:param kwargs: parameter will be ignored
:return:
"""
@abc.abstractmethod
def parse_option(self, parse_obj, option_dict):
"""
:param parse_obj: parser object which stores the data
:param option_dict: the configuration dict for the current option
:return:
"""
|
refeed/coala
|
tests/results/result_actions/ResultActionTest.py
|
Python
|
agpl-3.0
| 1,388 | 0 |
import unittest
from coalib.results.Result import Result
from coalib.results.result_actions.ResultAction import ResultAction
from coalib.settings.Section import Section
class ResultActionTest(unittest.TestCase):
def test_api(self):
uut = ResultAction()
result = Result('', '')
self.assertRaises(NotImplementedError, uut.apply, 5, {}, {})
self.assertRaises(NotImplementedError,
uut.apply_from_section,
'',
|
{},
{},
Section('name'))
self.assertRaises(TypeError, uut.apply_from_section, '', {}, {}, 5)
self.assertRaises(TypeError,
uut.apply_from_section,
'',
|
5,
{},
Section('name'))
self.assertRaises(TypeError,
uut.apply_from_section,
'',
{},
5,
Section('name'))
self.assertEqual(len(uut.get_metadata().non_optional_params), 0)
self.assertEqual(len(uut.get_metadata().optional_params), 0)
self.assertEqual(uut.get_metadata().name, 'ResultAction')
self.assertTrue(uut.is_applicable(result, None, None))
|
trelay/multi-executor
|
main/main_fio.py
|
Python
|
mit
| 1,282 | 0.013261 |
#!/usr/bin/python
import os, re,sys
from remote_exe import create_thread
from subprocess import Popen, PIPE
from main import base_fun
fio_cmd = "fio --name=global --ioengine=sync --bs=4k --rw=read --filename=/dev/{0} --runtime={1} --direct=1 -numjobs=1 -iodepth=4 --name=job"
stress_time = 60
class FIO_FUN(base_fun):
def __init__(self):
super(FIO_FUN, self).__init__()
def get_all_nvme(self):
self.nvme_list=[]
dev_list = os.listdir("/dev/")
#dev_list =['kmsg','stdin','nvme
|
0','nvme
|
0n1', 'nvme1','nvme10','nvme10n1','nvme11','nvme11n1']
p= re.compile(r'nvme\d+n\d')
for dev in dev_list:
match = p.search(dev)
if match:
self.nvme_list.append(dev)
return self.nvme_list
def run(self):
#argv_list = [{'log_name': 'log_path', 'command_line':'fio_testcommnd'},]
print >>sys.stderr,"Start Running"
self.get_all_nvme()
argv_list=[]
for nvme in self.nvme_list:
argv= dict()
argv.update(log_name= nvme)
command = fio_cmd.format(nvme,stress_time)
argv.update(command_line = command)
argv_list.append(argv)
create_thread(argv_list)
return "command executed"
|
ChristianTremblay/BAC0
|
BAC0/core/functions/cov.py
|
Python
|
lgpl-3.0
| 5,711 | 0.001926 |
from bacpypes.apdu import SubscribeCOVRequest, SimpleAckPDU, RejectPDU, AbortPDU
from bacpypes.iocb import IOCB
from bacpypes.core import deferred
from bacpypes.pdu import Address
from bacpypes.object import get_object_class, get_datatype
from bacpypes.constructeddata import Array
from bacpypes.primitivedata import Tag, ObjectIdentifier, Unsigned
from BAC0.core.io.Read import cast_datatype_from_tag
"""
using cov, we build a "context" which is turned into a subscription being sent to
the destination.
Once the IOCB is over, the callback attached to it will execute (subscription_acknowledged)
and we'll get the answer
"""
class SubscriptionContext:
next_proc_id = 1
def __init__(self, address, objectID, confirmed=None, lifetime=None, callback=None):
self.address = address
self.subscriberProcessIdentifier = SubscriptionContext.next_proc_id
SubscriptionContext.next_proc_id += 1
self.monitoredObjectIdentifier = objectID
self.issueConfirmedNotifications = confirmed
self.lifetime = lifetime
self.callback = callback
def cov_notification(self, apdu):
# make a rash assumption that the property value is going to be
# a single application encoded tag
source = apdu.pduSource
object_changed = apdu.monitoredObjectIdentifier
elements = {
"source": source,
"object_changed": object_changed,
"properties": {},
}
for element in apdu.listOfValues:
prop_id = element.propertyIdentifier
datatype = get_datatype(object_changed[0], prop_id)
value = element.value
if not datatype:
value = cast_datatype_from_tag(
element.value, object_changed[0], prop_id
)
else:
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (
element.propertyArrayIndex is not None
):
if element.propertyArrayIndex == 0:
value = element.value.cast_out(Unsigned)
else:
value = element.value.cast_out(datatype.subtype)
else:
value = element.value.cast_out(datatype)
elements["properties"][prop_id] = value
return elements
class CoV:
"""
Mixin to support COV registration
"""
def send_cov_subscription(self, request):
self._log.debug("Request : {}".format(r
|
equest))
iocb = IOCB(request)
self._log.debug("IOCB : {}".format(iocb))
iocb.add_callback(self.subscription_acknowledged)
# pass to the BACnet stack
d
|
eferred(self.this_application.request_io, iocb)
def subscription_acknowledged(self, iocb):
if iocb.ioResponse:
self._log.info("Subscription success")
if iocb.ioError:
self._log.error("Subscription failed. {}".format(iocb.ioError))
def cov(self, address, objectID, confirmed=True, lifetime=0, callback=None):
address = Address(address)
context = self._build_cov_context(
address, objectID, confirmed=confirmed, lifetime=lifetime, callback=callback
)
request = self._build_cov_request(context)
self.send_cov_subscription(request)
def cancel_cov(self, address, objectID, callback=None):
address = Address(address)
context = self._build_cov_context(
address, objectID, confirmed=None, lifetime=None, callback=callback
)
request = self._build_cov_request(context)
self.send_cov_subscription(request)
def _build_cov_context(
self, address, objectID, confirmed=True, lifetime=None, callback=None
):
context = SubscriptionContext(
address=address,
objectID=objectID,
confirmed=confirmed,
lifetime=lifetime,
callback=callback,
)
self.subscription_contexts[context.subscriberProcessIdentifier] = context
if "context_callback" not in self.subscription_contexts.keys():
self.subscription_contexts["context_callback"] = self.context_callback
return context
def _build_cov_request(self, context):
request = SubscribeCOVRequest(
subscriberProcessIdentifier=context.subscriberProcessIdentifier,
monitoredObjectIdentifier=context.monitoredObjectIdentifier,
)
request.pduDestination = context.address
# optional parameters
if context.issueConfirmedNotifications is not None:
request.issueConfirmedNotifications = context.issueConfirmedNotifications
if context.lifetime is not None:
request.lifetime = context.lifetime
return request
# def context_callback(self, elements, callback=None):
def context_callback(self, elements):
self._log.info("Received COV Notification for {}".format(elements))
# if callback:
# callback()
for device in self.registered_devices:
if str(device.properties.address) == str(elements["source"]):
device[elements["object_changed"]].cov_registered = True
for prop, value in elements["properties"].items():
if prop == "presentValue":
device[elements["object_changed"]]._trend(value)
else:
device[elements["object_changed"]].properties.bacnet_properties[
prop
] = value
break
|
stackforge/networking-bagpipe-l2
|
networking_bagpipe/objects/bgpvpn.py
|
Python
|
apache-2.0
| 15,705 | 0 |
# Copyright (c) 2017 Orange. # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_versionedobjects import fields as obj_fields
from networking_bgpvpn.neutron.db import bgpvpn_db
from neutron.api.rpc.callbacks import resources
from neutron.objects import base
from neutron.objects.ports import IPAllocation
from neutron.objects.ports import Port
from neutron.objects.router import RouterPort
from neutron.objects.subnet import Subnet
from neutron_lib.api.definitions import bgpvpn as bgpvpn_api
from neutron_lib.api.definitions import bgpvpn_routes_control as bgpvpn_rc_api
from neutron_lib import constants
from neutron_lib.objects import common_types
from neutron_lib.utils import net as net_utils
LOG = logging.getLogger(__name__)
def _get_gateway_mac_by_subnet(obj_context, subnet):
if not subnet.gateway_ip:
LOG.error("no gateway IP defined for subnet %s", subnet)
return None
ip_allocation = IPAllocation.get_object(obj_context,
network_id=subnet.network_id,
subnet_id=subnet.id,
ip_address=subnet.gateway_ip)
# pylint: disable=no-member
if ip_allocation:
port = Port.get_object(obj_context, id=ip_allocation.port_id)
return str(port.mac_address)
else:
LOG.debug("no port allocated to gateway IP for subnet %s", subnet.id)
return None
def _get_subnets_info(obj_context, net_id):
subnets = Subnet.get_objects(obj_context, network_id=net_id)
return [
{'ip_version': subnet.ip_version,
'id': subnet.id,
'cidr': subnet.cidr,
'gateway_ip': subnet.gateway_ip,
'gateway_mac': _get_gateway_mac_by_subnet(obj_context, subnet)
}
for subnet in subnets
]
class BGPVPNTypeField(obj_fields.AutoTypedField):
AUTO_TYPE = obj_fields.Enum(valid_values=bgpvpn_api.BGPVPN_TYPES)
@base.NeutronObjectRegistry.register
class BGPVPN(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
new_facade = True
db_model = bgpvpn_db.BGPVPN
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
'type': BGPVPNTypeField(),
'name': obj_fields.StringField(nullable=True,
default=None),
'route_targets': obj_fields.ListOfStringsField(nullable=True,
default=[]),
'import_targets': obj_fields.ListOfStringsField(nullable=True,
default=[]),
'export_targets': obj_fields.ListOfStringsField(nullable=True,
default=[]),
'route_distinguishers': obj_fields.ListOfStringsField(nullable=True,
default=[]),
'local_pref': obj_fields.IntegerField(nullable=True),
'vni': obj_fields.IntegerField(nullable=True),
}
fields_no_update = ['id',
|
'project_id',
'type',
'port_id']
foreign_keys = {'BGPVPNNetAssociation': {'id': 'bgpvpn_id'},
'BGPVPNRouterAssociation': {'id': 'bgpvpn_id'},
'BGPVPNPortAssociation': {'id': 'bgpvpn_id'},
|
'BGPVPNPortAssociationRoute': {'id': 'bgpvpn_id'},
}
@classmethod
def modify_fields_from_db(cls, db_obj):
result = super(BGPVPN, cls).modify_fields_from_db(db_obj)
for field in ['route_targets',
'import_targets',
'export_targets',
'route_distinguishers']:
if field in result:
result[field] = (result[field].split(',')
if result[field] else [])
return result
@classmethod
def modify_fields_to_db(cls, fields):
result = super(BGPVPN, cls).modify_fields_to_db(fields)
for field in ['route_targets',
'import_targets',
'export_targets',
'route_distinguishers']:
if field in result:
result[field] = ','.join(result.get(field, []))
return result
@base.NeutronObjectRegistry.register
class BGPVPNNetAssociation(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
new_facade = True
db_model = bgpvpn_db.BGPVPNNetAssociation
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
'bgpvpn_id': obj_fields.StringField(),
'bgpvpn': obj_fields.ObjectField('BGPVPN'),
'network_id': obj_fields.StringField(),
'subnets': common_types.ListOfDictOfMiscValuesField(nullable=True)
}
fields_no_update = ['id',
'project_id',
'bgpvpn_id',
'network_id']
synthetic_fields = ['bgpvpn',
'subnets']
def __init__(self, context=None, **kwargs):
super(BGPVPNNetAssociation, self).__init__(context, **kwargs)
def create(self):
with self.db_context_writer(self.obj_context):
super(BGPVPNNetAssociation, self).create()
self.obj_load_attr('subnets')
def obj_load_attr(self, attrname):
if attrname == 'subnets':
self._load_subnets()
else:
super(BGPVPNNetAssociation, self).obj_load_attr(attrname)
def _load_subnets(self, db_obj=None):
# pylint: disable=no-member
subnets_info = _get_subnets_info(self.obj_context, self.network_id)
setattr(self, 'subnets', subnets_info)
self.obj_reset_changes(['subnets'])
def from_db_object(self, obj):
super(BGPVPNNetAssociation, self).from_db_object(obj)
self._load_subnets(obj)
def all_subnets(self, network_id):
# pylint: disable=no-member
return self.subnets
@base.NeutronObjectRegistry.register
class BGPVPNRouterAssociation(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
new_facade = True
db_model = bgpvpn_db.BGPVPNRouterAssociation
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
'bgpvpn_id': obj_fields.StringField(),
'bgpvpn': obj_fields.ObjectField('BGPVPN'),
'router_id': obj_fields.StringField(),
'connected_networks':
common_types.ListOfDictOfMiscValuesField(nullable=True)
}
fields_no_update = ['id',
'project_id',
'bgpvpn_id',
'router_id']
synthetic_fields = ['bgpvpn',
'connected_networks']
def __init__(self, context=None, **kwargs):
super(BGPVPNRouterAssociation, self).__init__(context, **kwargs)
def create(self):
with self.db_context_writer(self.obj_context):
super(BGPVPNRouterAssociation, self).create()
self.obj_load_attr('connected_networks')
def update(self):
with self.db_context_writer(self.obj_context):
if 'connected_networks' in self.obj_what_changed():
self.obj_load_attr('connected_networks')
super(BGPVPNRouterAssociation, self).update()
def obj_load_attr(self, attrname):
if attrname == 'connected_networks':
return self._load_con
|
rtts/qqq
|
qqq/views.py
|
Python
|
gpl-3.0
| 3,044 | 0.012155 |
from django.http import HttpResponse, Http404
from django.template.loader import get_template
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage
from django.utils.translation import ugettext as _
from tagging.models import Tag
from messages.models import Message
from settings import LANGUAGE_CODE as lang
from qqq.models import Contribution
from qqq.questions.models import Question
from qqq.revisions.models import Revision
from qqq.collections.models import Collection
from qqq.posts.models import Post
import logging
# the numb
|
er of results to paginate by
RESULTS_PER_PAGE = 25
def home(request):
"""
Serves the home page, which depends on whether the user is logged in or not.
"""
if request.user.is_authenticated():
return participate(request)
else:
c = RequestContext(request)
if lang == "nl":
c['frontpage'] = 'frontpage_nl.html'
else:
c['frontpage'] = 'frontpage_en.html'
t = get_template('home_public.html')
c['tags_list'
|
] = Tag.objects.cloud_for_model(Question, steps=9, min_count=None)
return HttpResponse(t.render(c))
###################################################################################
#################################### MEMBERS ONLY #################################
###################################################################################
def participate(request):
"""
Serves the home page for logged-in users
"""
t = get_template('home_members.html')
c = RequestContext(request)
filter = request.GET.get(_('filter'), False)
# behold some serious django-fu!
if filter == _('questions'):
c['filter'] = 'questions'
questions = Question.objects.all()
objects = Contribution.objects.filter(question__in=questions).select_related('user', 'question', 'revision', 'collection', 'post', 'tagaction')
elif filter == _('improvements'):
c['filter'] = 'improvements'
revisions = Revision.objects.all()
objects = Contribution.objects.filter(revision__in=revisions).select_related('user', 'question', 'revision', 'collection', 'post', 'tagaction')
elif filter == _('collections'):
c['filter'] = 'collections'
collections = Collection.objects.all()
objects = Contribution.objects.filter(collection__in=collections).select_related('user', 'question', 'revision', 'collection', 'post', 'tagaction')
elif filter == _('posts'):
c['filter'] = 'posts'
posts = Post.objects.all()
objects = Contribution.objects.filter(post__in=posts).select_related('user', 'question', 'revision', 'collection', 'post', 'tagaction')
else:
objects = Contribution.objects.all().select_related('user', 'question', 'revision', 'collection', 'post', 'tagaction')
p = Paginator(objects, RESULTS_PER_PAGE)
c['type'] = {'all': True}
c['paginator'] = p
try:
c['feed'] = p.page(request.GET.get(_('page'), '1'))
except EmptyPage:
raise Http404
c['message_list'] = Message.objects.inbox_for(request.user)
return HttpResponse(t.render(c))
|
opinkerfi/nago
|
nago/extensions/checkresults.py
|
Python
|
agpl-3.0
| 4,996 | 0.003203 |
# -*- coding: utf-8 -*-
""" Get and post nagios checkresults between nago instances
This extension allows to get status data from a local nagios server.
Also pushing checkresults into a local nagios server, therefore updating nagios status.
"""
from pynag.Parsers import mk_livestatus, config
import time
import os
import os.path
import tempfile
from nago.core import nago_access
import nago.extensions.settings
@nago_access()
def get():
""" Get all nagios status information from a local nagios instance
"""
livestatus = mk_livestatus()
hosts = livestatus.get_hosts()
services = livestatus.get_services()
result = {}
result['hosts'] = hosts
result['services'] = services
return result
@nago_access()
def post(hosts=None, services=None, check_existance=True, create_services=True, create_hosts=False):
""" Puts a list of hosts into local instance of nagios checkresults
Arguments:
hosts -- list of dicts, like one obtained from get_checkresults
services -- list of dicts, like one obtained from get_checkresults
check_existance -- If True, check (and log) if objects already exist before posting
create_services -- If True, autocreate non-existing services (where the host already exists)
create_hosts -- If True, autocreate non-existing hosts
"""
nagios_config = config()
nagios
|
_config.parse_maincfg()
check_result_path = nagios_config.get_cfg_value("check_result_path")
|
fd, filename = tempfile.mkstemp(prefix='c', dir=check_result_path)
if not hosts:
hosts = []
if not services:
services = []
if check_existance:
checkresults_overhaul(hosts, services, create_services=create_services, create_hosts=create_hosts)
checkresults = '### Active Check Result File Made by Nago ###\n'
checkresults += 'file_time=%s' % (int(time.time()))
checkresults = ''
for host in hosts:
checkresults += _format_checkresult(**host)
for service in services:
checkresults += _format_checkresult(**service)
os.write(fd, checkresults)
# Cleanup and make sure our file is readable by nagios
os.close(fd)
os.chmod(filename, 0644)
# Create an ok file, so nagios knows it's ok to reap our changes
file('%s.ok' % filename, 'w')
@nago_access()
def send(remote_host=None):
""" Send local nagios data to a remote nago instance """
my_data = get()
if not remote_host:
remote_host = nago.extensions.settings.get('server')
remote_node = nago.core.get_node(remote_host)
remote_node.send_command('checkresults', 'post', **my_data)
return "checkresults sent to %s" % remote_host
def checkresults_overhaul(hosts, services, create_services, create_hosts):
""" Iterates through hosts and services, and filters out those who do not exist in our local monitoring core
If create_services or create_hosts are defined, then
"""
def _format_checkresult(**kwargs):
""" Returns a string in a nagios "checkresults" compatible format """
o = {}
o['check_type'] = '1'
o['check_options'] = '0'
o['scheduled_check'] = '1'
o['reschedule_check'] = '1'
o['latency'] = '0.0'
o['start_time'] = '%5f' % time.time()
o['finish_time'] = '%5f' % time.time()
o['early_timeout'] = '0'
o['exited_ok'] = '1'
o['long_plugin_output'] = ''
o['performance_data'] = ''
o.update(locals())
o.update(kwargs)
del o['kwargs']
del o['o']
template = _host_check_result
# Escape all linebreaks if we have them
for k, v in o.items():
if isinstance(v, basestring) and '\n' in v:
o[k] = v.replace('\n', '\\n')
# Livestatus returns slightly different output than status.dat
# Lets normalize everything to status.dat format
if 'name' in o and not 'host_name' in o:
o['host_name'] = o['name']
if 'state' in o and not 'return_code' in o:
o['return_code'] = o['state']
if 'description' in o and not 'service_description' in o:
o['service_description'] = o['description']
if not o['performance_data'] and 'perf_data' in o:
o['performance_data'] = o['perf_data']
# If this is a service (as opposed to host) lets add service_description field in out putput
if 'service_description' in o:
template += "service_description={service_description}\n"
if not o['performance_data'].endswith('\\n'):
o['performance_data'] += '\\n'
# Format the string and return
return template.format(**o) + '\n'
# This is an example of what checkresult file looks like to nagios. This is used by
# _format_checkresult()
_host_check_result = """
host_name={host_name}
check_type={check_type}
check_options=0
scheduled_check=1
reschedule_check=1
latency=0.0
start_time={start_time}
finish_time={finish_time}
early_timeout=0
exited_ok=1
return_code={return_code}
output={plugin_output}{long_plugin_output} | {performance_data}
"""
|
jeremiahyan/odoo
|
addons/l10n_ar/tests/__init__.py
|
Python
|
gpl-3.0
| 122 | 0 |
# Part of Odoo. See LICENSE file for full c
|
opyright and licensing details.
from . import common
from . import test_manual
| |
cstewart90/kattis-python
|
easiest/easiest.py
|
Python
|
mit
| 428 | 0 |
"
|
""
https://open.kattis.com/problems/easiest
"""
import sys
def sum_digits(number):
sum_of_digits = 0
while number:
sum_of_digits, number = sum_of_digits + number % 10, number // 10
return sum_of_digits
for line in sys.stdin:
n = int(line)
if n == 0:
break
p = 11
while T
|
rue:
if sum_digits(n) == sum_digits(n * p):
print(p)
break
p += 1
|
IZSVenezie/VetEpiGIS-Group
|
plugin/export.py
|
Python
|
gpl-3.0
| 17,939 | 0.001672 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
VetEpiGIS-Group
A QGIS plugin
Spatial functions for vet epidemiology
-------------------
begin : 2016-05-06
git sha : $Format:%H$
copyright : (C) 2016 by Norbert Solymosi
email : solymosi.norbert@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os, shutil
from PyQt5.QtGui import *
from PyQt5.QtCore import SIGNAL, Qt, QSettings, QCoreApplication, QFile, QFileInfo, QDate, QVariant, \
pyqtSignal, QRegExp, QDateTime, QTranslator, QFile, QDir, QIODevice, QTextStream
from qgis.core import QgsDataSourceURI
from PyQt5.QtSql import *
import psycopg2
import psycopg2.extensions
# use unicode!
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
from export_dialog import Ui_Dialog
class Dialog(QDialog, Ui_Dialog):
def __init__(self):
"""Constructor for the dialog.
"""
QDialog.__init__(self)
self.setupUi(self)
self.plugin_dir = ''
self.settings = ''
# # self.comboBox.currentIndexChanged.connect(self.seltype)
# self.commandLinkButton.clicked.connect(self.createNewSLdb)
# self.toolButton.clicked.connect(self.dbSource)
#
# self.groupBox.clicked.connect(self.seltype)
# self.groupBox_2.clicked.connect(self.seltype)
#
# self.commandLinkButton_2.clicked.connect(self.createPGtables)
# self.lineEdit.setText('/home/sn/dev/QGISplugins/VetEpiGIS/groupdata/c.sqlite')
# def dbSource(self):
# dbpath = QFileDialog.getOpenFileName(self, 'Select file', QDir.currentPath(), 'SpatiaLite file (*.sqlite *.*)')
# if not os.path.isfile(dbpath):
# self.lineEdit.setText(dbpath)
#
#
# def seltype(self):
# if self.groupBox.isChecked():
# self.groupBox_2.setChecked(False)
# self.groupBox.setChecked(True)
#
# if self.groupBox_2.isChecked():
# self.groupBox.setChecked(False)
# self.groupBox_2.setChecked(True)
#
# # self.tabWidget.setCurrentIndex(self.comboBox.currentIndex())
# # if self.comboBox.currentText()=='SpatiaLite':
# # self.tabWidget.setCurrentIndex(0)
# # else:
# # self.tabWidget.setCurrentIndex(1)
#
#
# def createNewSLdb(self):
# fileName = QFileDialog.getSaveFileName(self, caption='Create new SpatiaLite database')
# try:
# QApplication.setOverrideCursor(Qt.WaitCursor)
# file = QFile(fileName + '.sqlite')
# dbpath = QFileInfo(file).absoluteFilePath()
# dbfold = os.path.join(self.plugin_dir, 'db')
# if not os.path.isfile(dbpath):
# shutil.copy(os.path.join(dbfold, 'base.sqlite'), dbpath)
# self.lineEdit.setText(dbpath)
#
# db = QSqlDatabase.addDatabase('QSPATIALITE')
# db.setDatabaseName(dbpath)
# db.open()
# query = db.exec_(
# """
# CREATE TABLE outbreaks_point (
# gid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# localid text,
# code text,
# largescale text,
# disease text,
# animalno numeric,
# species text,
# production text,
# year numeric,
# status text,
# suspect text,
# confirmation text,
# expiration text,
# notes text,
# hrid text,
# timestamp text,
# grouping text
# );
# """
# )
# query = db.exec_("SELECT AddGeometryColumn('outbreaks_point', 'geom', 4326, 'POINT', 'XY');")
# query = db.exec_(
# """
# CREATE TABLE outbreaks_area (
# gid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# localid text,
# code text,
# largescale text,
# disease text,
# animalno numeric,
# species text,
# production text,
# year numeric,
# status text,
# suspect text,
# confirmation text,
# expiration text,
# notes text,
# hrid text,
# timestamp text,
# grouping text
# );
# """
# )
# query = db.exec_("SELECT AddGeometryColumn('outbreaks_area', 'geom', 4326, 'MULTIPOLYGON', 'XY');")
# query = db.exec_(
# """
# CREATE TABLE pois (
# gid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# localid text,
# code text,
# activity text,
# hrid text
# );
# """)
# query = db.exec_("SELECT AddGeometryColumn('pois', 'geom', 4326, 'POINT', 'XY');")
# query = db.exec_(
# """
# CREATE TABLE buffers (
# gid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# localid text,
# code text,
# largescale text,
# disease text,
# animalno numeric,
# species text,
# production text,
# year numeric,
# status text,
# suspect text,
# confirmation text,
# expiration text,
# notes text,
# hrid text,
# timestamp text
# );
# """)
# query = db.exec_("SELECT AddGeometryColumn('buffers', 'geom', 4326, 'MULTIPOLYGON', 'XY');")
# query = db.exec_(
# """
# CREATE TABLE zones (
# gid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
# localid text,
# code text,
# disease text,
# zonetype text,
# subpopulation text,
# validity_start text,
|
# validity_end text,
# legal_framework text,
# competent_authority text,
#
|
biosecurity_measures text,
# control_of_vectors text,
# control_of_wildlife_reservoir text,
# modified_stamping_out text,
# movement_restriction text,
# stamping_out text,
# surveillance text,
# vaccination text,
# other_measure text,
# related text,
# hrid text,
# timestamp text
# );
# """)
#
|
siddhika1889/Pydev-Editor
|
tests/pysrc/extendable/recursion_on_non_existent/__init__.py
|
Python
|
epl-1.0
| 31 | 0.032258 |
from u
|
nexist
|
ent_import import *
|
kabooom/plugin.video.xstream
|
resources/lib/download.py
|
Python
|
gpl-3.0
| 4,301 | 0.006278 |
# -- coding: utf-8 --
from resources.lib.gui.gui import cGui
from resources.lib.config import cConfig
from resources.lib import common
import urllib2
import xbmc
import xbmcgui
import string
import logger
import time
import os
import sys
class cDownload:
def __createProcessDialog(self):
oDialog = xbmcgui.DialogProgress()
oDialog.create('Download')
self.__oDialog = oDialog
def __createDownloadFilename(self, sTitle):
#valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
#filename = ''.join(c for c in sTitle if c in valid_chars)
filename = sTitle
filename = filename.replace(' ','_')
return filename
def download(self, url, sTitle, showDialog = True):
sTitle = u'%s' % sTitle.decode('utf-8')
self.__processIsCanceled = False
# extract header
try: header = dict([item.split('=') for item in (url.split('|')[1]).split('&')])
except: header = {}
logger.info('Header for download: %s' % (header))
url = url.split('|')[0]
sTitle = self.__createTitle(url, sTitle)
self.__sTitle = self.__createDownloadFilename(sTitle)
if showDialog:
oGui = cGui()
self.__sTitle = oGui.showKeyBoard(self.__sTitle)
if (self.__sTitle != False and len(self.__sTitle) > 0):
sPath = cConfig().getSetting('download-folder')
if sPath == '':
dialog = xbmcgui.Dialog()
sPath = dialog.browse(3, 'Downloadfolder', 'files', '')
if (sPath != ''):
sDownloadPath = xbmc.translatePath(sPath + '%s' % (self.__sTitle, ))
self.__prepareDownload(url, header, sDownloadPath)
elif self.__sTitle != False:
temp_dir = os.path.join(common.addonPath, "TEMP")
if not os.path.is
|
dir(temp_dir):
os.makedirs(os.path.join(temp_dir))
self.__prepareDownload(url, header, os.path.join(temp_dir, sTitle))
def __prepareDownload(self, url, header, sDownloadPath):
try:
logger.info('download file: ' + str(url) + ' to ' + str(sDownloadPath))
self.__createProcessDialog()
request = urllib2.Request(url, headers=header)
self.__download(urllib2.urlopen(request), sDownloadPath)
|
except Exception as e:
logger.error(e)
self.__oDialog.close()
def __download(self, oUrlHandler, fpath):
headers = oUrlHandler.info()
iTotalSize = -1
if "content-length" in headers:
iTotalSize = int(headers["Content-Length"])
chunk = 4096
if sys.platform.startswith('win'):
f = open(r'%s' % fpath.decode('utf-8'), "wb")
else:
f = open(r'%s' % fpath, "wb")
iCount = 0
self._startTime = time.time()
while 1:
iCount = iCount +1
data = oUrlHandler.read(chunk)
if not data or self.__processIsCanceled == True:
break
f.write(data)
self.__stateCallBackFunction(iCount, chunk, iTotalSize)
def __createTitle(self, sUrl, sTitle):
aTitle = sTitle.rsplit('.')
if (len(aTitle) > 1):
return sTitle
aUrl = sUrl.rsplit('.')
if (len(aUrl) > 1):
sSuffix = aUrl[-1]
sTitle = sTitle + '.' + sSuffix
return sTitle
def __stateCallBackFunction(self, iCount, iBlocksize, iTotalSize):
timedif = time.time() - self._startTime
currentLoaded = float(iCount * iBlocksize)
if timedif > 0.0:
avgSpd = int(currentLoaded/timedif/1024.0)
else:
avgSpd = 5
iPercent = int( currentLoaded*100/ iTotalSize)
self.__oDialog.update(iPercent, self.__sTitle, '%s/%s@%dKB/s' %(self.__formatFileSize(currentLoaded),self.__formatFileSize(iTotalSize),avgSpd))
if (self.__oDialog.iscanceled()):
self.__processIsCanceled = True
self.__oDialog.close()
def __formatFileSize(self, iBytes):
iBytes = int(iBytes)
if (iBytes == 0):
return '%.*f %s' % (2, 0, 'MB')
return '%.*f %s' % (2, iBytes/(1024*1024.0) , 'MB')
|
dbcli/pgcli
|
pgcli/magic.py
|
Python
|
bsd-3-clause
| 2,270 | 0.000441 |
from .main import PGCli
import sql.parse
import sql.connection
import logging
_logger = logging.getLogger(__name__)
def load_ipython_extension(ipython):
"""This is called via the ipython command '%load_ext pgcli.magic'"""
# first, load the sql magic if it isn't already loaded
if not ipython.find_line_magic("sql"):
ipython.run_line_magic("load_ext", "sql")
# register our own magic
ipython.register_magic_function(pgcli_line_magic, "line", "pgcli")
def pgcli_line_magic(line):
_logger.debug("pgcli magic called: %r", line)
parsed = sql.parse.parse(line, {})
# "get" was renamed to "set" in ipython-sql:
# https://github.com/catherinedevlin/ipython-sql/commit/f4283c65aaf68f961e84019e8b939e4a3c501d43
if hasattr(sql.connection.Connection, "get"):
conn = sql.connection.Connection.get(parsed["connection"])
else:
try:
conn = sql.connection.Connection.set(parsed["connection"])
# a new positional argument was added to Connection.set in version 0.4.0 of ipython-sql
except TypeError:
conn = sql.connection.Connection.set(parsed["connection"], False)
try:
# A corresponding pgcli object already exists
pgcli = conn._pgcli
_logger.debug("Reusing existing pgcli")
except AttributeError:
# I can't figure out how to get the underylying psycopg2 connection
# from the sqlalchemy connection, so just grab the url and make a
# new connection
pgcli = PGCli()
u = conn.session.engine.url
_logger.debug("New pgcli: %r", str(u))
pgcli.connect(u.database, u.host, u.username, u.port, u.password)
conn._pgcli = pgcli
# For convenience, print the connection alias
print(f"Connected: {conn.name}")
try:
pgcli.run_cli()
except SystemExit:
pass
if n
|
ot pgcli.query_history:
return
q = pgcli.query_hi
|
story[-1]
if not q.successful:
_logger.debug("Unsuccessful query - ignoring")
return
if q.meta_changed or q.db_changed or q.path_changed:
_logger.debug("Dangerous query detected -- ignoring")
return
ipython = get_ipython()
return ipython.run_cell_magic("sql", line, q.query)
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/virt/ovirt/dmcrypt.py
|
Python
|
gpl-2.0
| 2,226 | 0 |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova.virt.libvirt import utils
_dmcrypt_suffix = '-dmcrypt'
def volume_name(base):
"""Returns the suffixed dmcrypt volume name.
This is to avoid collisions with similarly named device mapper names for
LVM volumes
"""
return base + _dmcrypt_suffix
def is_encrypted(path):
"""Returns true if the path corresponds to an encrypted disk."""
if path.startswith('/
|
dev/mapper'):
return path.rpartition('/')[2].endswith(_dmcrypt_suffix)
else:
return False
def create_volume(target, device, cipher, key_size, key):
"""Sets up a dmcrypt mapping
:param target: device mapper logical device name
:param device: underlying block device
:param cipher: encryption cipher string digestible by cryptsetup
:param key_size: encryption key size
:param key: encryption key as an array of unsigned bytes
"""
|
cmd = ('cryptsetup',
'create',
target,
device,
'--cipher=' + cipher,
'--key-size=' + str(key_size),
'--key-file=-')
key = ''.join(map(lambda byte: "%02x" % byte, key))
utils.execute(*cmd, process_input=key, run_as_root=True)
def delete_volume(target):
"""Deletes a dmcrypt mapping
:param target: name of the mapped logical device
"""
utils.execute('cryptsetup', 'remove', target, run_as_root=True)
def list_volumes():
"""Function enumerates encrypted volumes."""
return [dmdev for dmdev in os.listdir('/dev/mapper')
if dmdev.endswith('-dmcrypt')]
|
endlessm/chromium-browser
|
third_party/chromite/cros_bisect/autotest_evaluator_unittest.py
|
Python
|
bsd-3-clause
| 24,070 | 0.0027 |
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test autotest_evaluator module."""
from __future__ import print_function
import os
from chromite.cros_bisect import autotest_evaluator
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import remote_access
from chromite.lib import remote_access_unittest
from chromite.lib import repo_util_unittest
class RemoteShScpMock(remote_access_unittest.RemoteShMock):
"""In addition to RemoteSh, it mocks ScpToLocal."""
ATTRS = ('RemoteSh', 'ScpToLocal')
def ScpToLocal(self, _, remote, local, **kwargs):
return self._results['ScpToLocal'].LookupResult(
([remote, local],), kwargs=kwargs)
class TestAutotestEvaluator(cros_test_lib.MockTempDirTestCase):
"""Tests AutotestEvaluator class."""
BOARD = 'samus'
TEST_NAME = 'graphics_WebGLAquarium'
METRIC = 'avg_fps_1000_fishes/summary/value'
REPORT_FILE = 'reports.json'
REMOTE_REPORT_FILE = '%s/results/default/%s/results/results-chart.json' % (
autotest_evaluator.AutotestEvaluator.AUTOTEST_BASE, TEST_NAME)
DUT_IP = '192.168.1.1'
DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_IP)
TEST_TARGET = '%s/tests/%s/control' % (
autotest_evaluator.AutotestEvaluator.AUTOTEST_BASE, TEST_NAME)
AQUARIUM_REPORT_TEMPLATE = """
{"avg_fps_1000_fishes": {
"summary": {
"units": "fps",
"type": "scalar",
"value": %s,
"improvement_direction": "up"
}
}
}"""
BUILD_LABEL = 'base'
AUTOTEST_CLIENT = autotest_evaluator.AutotestEvaluator.AUTOTEST_CLIENT
TEST_THAT_COMMAND = ['test_that', '-b', BOARD, '--fast', '--args',
'local=True', DUT_IP, TEST_NAME]
def setUp(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=False)
# Sets up default options and evaluator object.
self.options = cros_test_lib.EasyAttr(
base_dir=self.tempdir, board=self.BOARD, test_name=self.TEST_NAME,
metric=self.METRIC, metric_take_average=False, reuse_eval=True,
chromium_dir=None, cros_dir=None, eval_passing_only=False)
self.evaluator = autotest_evaluator.AutotestEvaluator(self.options)
def PrepareWebglAquariumReports(self, scores):
"""Prepares graphics_WebGLAquarium reports.
It is a simplified version. What test cares is
"avg_fps_1000_fishes/summary/value". It can produces multiple reports if
more than one score is given.
Args:
scores: List of scores.
Returns:
A list of file names storing in report directory.
"""
result = []
num_reports = len(scores)
for ith, score in enumerate(scores, start=1):
report_file = os.path.join(
self.tempdir, 'reports',
'results-chart.%s.%d-%d.json' % (self.BUILD_LABEL, ith, num_reports))
osutils.WriteFile(report_file, self.AQUARIUM_REPORT_TEMPLATE % score)
result.append(report_file)
return result
def UpdateOptionsAndEvaluator(self, options_to_update):
"""Updates se
|
lf.options and self.evaluator.
Based on updated self.options, it creates a new AutotestEvaluator instance
and assigns to self.evaluator.
Args:
options_to_update: a dict to update self.options.
"
|
""
self.options.update(options_to_update)
self.evaluator = autotest_evaluator.AutotestEvaluator(self.options)
def testInit(self):
"""Tests that AutotestEvaluator() works as expected."""
base_dir = self.tempdir
self.assertEqual(base_dir, self.evaluator.base_dir)
self.assertEqual(os.path.join(base_dir, 'reports'),
self.evaluator.report_base_dir)
self.assertTrue(os.path.isdir(self.evaluator.report_base_dir))
self.assertEqual(self.BOARD, self.evaluator.board)
self.assertEqual(self.TEST_NAME, self.evaluator.test_name)
self.assertEqual(self.METRIC, self.evaluator.metric)
self.assertFalse(self.evaluator.metric_take_average)
self.assertTrue(self.evaluator.reuse_eval)
self.assertEqual(os.path.join(base_dir, 'chromium'),
self.evaluator.chromium_dir)
# With chromium_dir specified and flip booleans.
self.UpdateOptionsAndEvaluator(
dict(chromium_dir='/tmp/chromium', reuse_eval=False))
self.assertFalse(self.evaluator.metric_take_average)
self.assertFalse(self.evaluator.reuse_eval)
self.assertEqual('/tmp/chromium', self.evaluator.chromium_dir)
def testInitMissingRequiredArgs(self):
"""Tests that AE() raises exception when required options are missing."""
options = cros_test_lib.EasyAttr()
with self.assertRaises(Exception) as cm:
autotest_evaluator.AutotestEvaluator(options)
exception_message = str(cm.exception)
self.assertIn('Missing command line', exception_message)
self.assertIn('AutotestEvaluator', exception_message)
for arg in autotest_evaluator.AutotestEvaluator.REQUIRED_ARGS:
self.assertIn(arg, exception_message)
def testRunTestFromDut(self):
"""Tests that RunTestFromDut() invokes expected commands."""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=0,
mock_attr='ScpToLocal')
self.assertTrue(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutSanityCheckFail(self):
"""Tests RunTestFromDut() when autotest control file is missing."""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutLsSshError(self):
"""Tests RunTestFromDut() when autotest control file is missing."""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET],
returncode=remote_access.SSH_ERROR_CODE)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutAutotestSshErrorWithEvalPassingOnly(self):
"""Tests RunTestFromDut() with failed autotest and --eval-passing-only."""
self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True))
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET],
returncode=remote_access.SSH_ERROR_CODE)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutAutotestFailWithEvalPassingOnly(self):
"""Tests RunTestFromDut() with failed autotest and --eval-passing-only."""
self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True))
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutAutotestFailWithFailsafe(self):
"""Tests RunTestFromDut() with failed autotest.
Even if the autotest fails to run, RunTestFromDut() tries to retrieve report
|
rh-marketingops/dwm
|
dwm/test/test_records.py
|
Python
|
gpl-3.0
| 11,832 | 0.008874 |
record_lookupAll_genericLookup_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
record_lookupAll_genericLookup_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
record_lookupAll_genericLookup_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
record_lookupAll_fieldSpecificLookup_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
record_lookupAll_fieldSpecificLookup_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
record_lookupAll_fieldSpecificLookup_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
record_lookupAll_normLookup_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
record_lookupAll_normLookup_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
record_lookupAll_normLookup_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
record_regexAll_genericRegex_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
record_regexAll_genericRegex_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
record_regexAll_genericRegex_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
record_regexAll_fieldSpecificRegex_caught = [{"emailAddress": "test@test.com", "fie
|
ld1": "badvalue"}]
record_regexAll_fieldSpe
|
cificRegex_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
record_regexAll_fieldSpecificRegex_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
record_regexAll_normRegex_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
record_regexAll_normRegex_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
record_regexAll_normRegex_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
record_normIncludes_included_caught = [{"emailAddress": "test@test.com", "field1": "findgoodinvaluejunk"}]
record_normIncludes_included_uncaught = [{"emailAddress": "test@test.com", "field1": "nothere"}]
record_normIncludes_excluded_caught = [{"emailAddress": "test@test.com", "field1": "findgoodinvaluejunk butstuffnobad"}]
record_normIncludes_excluded_uncaught = [{"emailAddress": "test@test.com", "field1": "findgoodinvaluejunk uncaught"}]
record_normIncludes_begins_caught = [{"emailAddress": "test@test.com", "field1": "abcdefg"}]
record_normIncludes_begins_uncaught = [{"emailAddress": "test@test.com", "field1": "hijklmnop"}]
record_normIncludes_ends_caught = [{"emailAddress": "test@test.com", "field1": "qrstuvwxyz"}]
record_normIncludes_ends_uncaught = [{"emailAddress": "test@test.com", "field1": "notalpha"}]
record_normIncludes_notChecked = [{"emailAddress": "test@test.com", "field2": "doesnotmatter"}]
record_deriveAll_deriveValue_caught = [{"emailAddress": "test@test.com", "field1": "", "field2": "findthis"}]
record_derive_sort = [{"emailAddress": "test@test.com", "field1": "", "field3": "findthis", "field4": "nofindthis"}]
record_deriveAll_deriveValue_overwriteFalse = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "findthis"}]
record_deriveAll_deriveValue_blankIfNoMatch = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "youwillnotfindthis"}]
record_deriveAll_deriveValue_uncaught = [{"emailAddress": "test@test.com", "field1": "", "field2": "dontfindthis"}]
record_deriveAll_deriveValue_notChecked = [{"emailAddress": "test@test.com", "field3": "", "field2": "findthis"}]
record_deriveAll_copyValue = [{"emailAddress": "test@test.com", "field1": "", "field2": "newvalue"}]
record_deriveAll_deriveRegex_caught = [{"emailAddress": "test@test.com", "field1": "", "field2": "findthis"}]
record_deriveAll_deriveRegex_uncaught = [{"emailAddress": "test@test.com", "field1": "", "field2": "dontfindthis"}]
record_deriveAll_deriveRegex_notChecked = [{"emailAddress": "test@test.com", "field3": "", "field2": "findthis"}]
record_deriveAll_deriveRegex_overwriteFalse = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "findthis"}]
record_deriveAll_deriveRegex_blankIfNoMatch = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "youwillnotfindthis"}]
record_deriveAll_deriveIncludes_included_caught = [{"emailAddress": "test@test.com", "field1": "", "field2": "findgoodinvaluejunk"}]
record_deriveAll_deriveIncludes_included_uncaught = [{"emailAddress": "test@test.com", "field1": "", "field2": "nothere"}]
record_deriveAll_deriveIncludes_excluded_caught = [{"emailAddress": "test@test.com", "field1": "", "field2": "findgoodinvaluejunk butstuffnobad"}]
record_deriveAll_deriveIncludes_excluded_uncaught = [{"emailAddress": "test@test.com", "field1": "", "field2": "findgoodinvaluejunk uncaught"}]
record_deriveAll_deriveIncludes_begins_caught = [{"emailAddress": "test@test.com", "field1": "", "field2": "abcdefg"}]
record_deriveAll_deriveIncludes_begins_uncaught = [{"emailAddress": "test@test.com", "field1": "", "field2": "hijklmnop"}]
record_deriveAll_deriveIncludes_ends_caught = [{"emailAddress": "test@test.com", "field1": "", "field2": "qrstuvwxyz"}]
record_deriveAll_deriveIncludes_ends_uncaught = [{"emailAddress": "test@test.com", "field1": "", "field2": "notalpha"}]
record_deriveAll_deriveIncludes_notChecked = [{"emailAddress": "test@test.com", "field2": "", "field3": "doesnotmatter"}]
record_deriveAll_deriveIncludes_overwriteFalse = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "findgoodinvaluejunk"}]
record_deriveAll_deriveIncludes_blankIfNoMatch = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "nothere"}]
#
history_genericLookup_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
history_genericLookup_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
history_genericLookup_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
history_fieldSpecificLookup_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
history_fieldSpecificLookup_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
history_fieldSpecificLookup_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
history_normLookup_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
history_normLookup_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
history_normLookup_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
history_genericRegex_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
history_genericRegex_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
history_genericRegex_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
history_fieldSpecificRegex_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
history_fieldSpecificRegex_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
history_fieldSpecificRegex_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
history_normRegex_caught = [{"emailAddress": "test@test.com", "field1": "badvalue"}]
history_normRegex_uncaught = [{"emailAddress": "test@test.com", "field1": "badvalue-uncaught"}]
history_normRegex_notChecked = [{"emailAddress": "test@test.com", "field2": "badvalue"}]
history_normIncludes_included_caught = [{"emailAddress": "test@test.com", "field1": "findgoodinvaluejunk"}]
history_normIncludes_included_uncaught = [{"emailAddress": "test@test.com", "field1": "nothere"}]
history_normIncludes_notChecked = [{"emailAddress": "test@test.com", "field2": "doesnotmatter"}]
history_deriveValue_caught = [{"emailAddress": "test@test.com", "field1": "", "field2": "findthis"}]
history_deriveValue_overwriteFalse = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "findthis"}]
history_deriveValue_blankIfNoMatch = [{"emailAddress": "test@test.com", "field1": "oldvalue", "field2": "youwillnotfindthis"}]
history_deriveValue_uncaught = [{"emailAddress": "test@test.com", "field1": "", "field2": "dontfindthis"}]
history_deriveValue_notChecked = [{
|
ncbray/pystream
|
bin/util/application/async.py
|
Python
|
apache-2.0
| 1,436 | 0.021588 |
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['async', 'async_limited']
import threading
import functools
enabled = True
def async(func):
@fun
|
ctools.wraps(func)
def async_wrapper(*args, **kargs):
t = threading.Thread(target=func
|
, args=args, kwargs=kargs)
t.start()
return t
if enabled:
return async_wrapper
else:
return func
def async_limited(count):
def limited_func(func):
semaphore = threading.BoundedSemaphore(count)
# closure with func and semaphore
def thread_wrap(*args, **kargs):
result = func(*args, **kargs)
semaphore.release()
return result
# closure with thread_wrap and semaphore
@functools.wraps(func)
def limited_wrap(*args, **kargs):
semaphore.acquire()
t = threading.Thread(target=thread_wrap, args=args, kwargs=kargs)
t.start()
return t
if enabled:
return limited_wrap
else:
return func
return limited_func
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/webdriver/tests/get_title/iframe.py
|
Python
|
bsd-3-clause
| 2,183 | 0.000916 |
import pytest
from tests.support.asserts import assert_success
"""
Tests that WebDriver can transcend site origins.
Many modern browsers impose strict cross-origin checks,
and WebDriver should be able to transcend these.
Although an implementation detail, certain browsers
also enforce process isolation based on site origin.
This is known to sometimes cause problems for WebDriver implementations.
"""
@pytest.fixture
def frame_doc(inline):
return inline("<title>cheese</title><p>frame")
@pytest.fixture
def one_frame_doc(inline, frame_doc):
return inline("<title>bar</title><iframe src='%s'></iframe>" % frame_doc)
@pytest.fixture
def nested_frames_doc(inline, one_frame_doc):
|
return inline("<title>foo</title><
|
iframe src='%s'></iframe>" % one_frame_doc)
def get_title(session):
return session.transport.send(
"GET", "session/{session_id}/title".format(**vars(session)))
def test_no_iframe(session, inline):
session.url = inline("<title>Foobar</title><h2>Hello</h2>")
result = get_title(session)
assert_success(result, "Foobar")
def test_iframe(session, one_frame_doc):
session.url = one_frame_doc
frame = session.find.css("iframe", all=False)
session.switch_frame(frame)
session.find.css("p", all=False)
response = get_title(session)
assert_success(response, "bar")
def test_nested_iframe(session, nested_frames_doc):
session.url = nested_frames_doc
outer_frame = session.find.css("iframe", all=False)
session.switch_frame(outer_frame)
inner_frame = session.find.css("iframe", all=False)
session.switch_frame(inner_frame)
session.find.css("p", all=False)
response = get_title(session)
assert_success(response, "foo")
@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
def test_origin(session, inline, iframe, domain):
session.url = inline("<title>foo</title>{}".format(
iframe("<title>bar</title><p>frame", domain=domain)))
frame = session.find.css("iframe", all=False)
session.switch_frame(frame)
session.find.css("p", all=False)
response = get_title(session)
assert_success(response, "foo")
|
lukecwik/incubator-beam
|
sdks/python/apache_beam/testing/test_stream_test.py
|
Python
|
apache-2.0
| 38,080 | 0.002784 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF license
|
s this file to You und
|
er the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the test_stream module."""
# pytype: skip-file
import unittest
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileHeader
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileRecord
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import OutputFormat
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import ReverseTestStream
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.test_stream import WatermarkEvent
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.testing.test_stream_service import TestStreamServiceController
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.testing.util import equal_to_per_window
from apache_beam.transforms import trigger
from apache_beam.transforms import window
from apache_beam.transforms.window import FixedWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils import timestamp
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import PaneInfo
from apache_beam.utils.windowed_value import PaneInfoTiming
from apache_beam.utils.windowed_value import WindowedValue
class TestStreamTest(unittest.TestCase):
def test_basic_test_stream(self):
test_stream = (TestStream()
.advance_watermark_to(0)
.add_elements([
'a',
WindowedValue('b', 3, []),
TimestampedValue('c', 6)])
.advance_processing_time(10)
.advance_watermark_to(8)
.add_elements(['d'])
.advance_watermark_to_infinity()) # yapf: disable
self.assertEqual(
test_stream._events,
[
WatermarkEvent(0),
ElementEvent([
TimestampedValue('a', 0),
TimestampedValue('b', 3),
TimestampedValue('c', 6),
]),
ProcessingTimeEvent(10),
WatermarkEvent(8),
ElementEvent([
TimestampedValue('d', 8),
]),
WatermarkEvent(timestamp.MAX_TIMESTAMP),
])
def test_test_stream_errors(self):
with self.assertRaises(
AssertionError, msg=('Watermark must strictly-monotonically advance.')):
_ = (TestStream().advance_watermark_to(5).advance_watermark_to(4))
with self.assertRaises(
AssertionError,
msg=('Must advance processing time by positive amount.')):
_ = (TestStream().advance_processing_time(-1))
with self.assertRaises(
AssertionError,
msg=('Element timestamp must be before timestamp.MAX_TIMESTAMP.')):
_ = (
TestStream().add_elements(
[TimestampedValue('a', timestamp.MAX_TIMESTAMP)]))
def test_basic_execution(self):
test_stream = (TestStream()
.advance_watermark_to(10)
.add_elements(['a', 'b', 'c'])
.advance_watermark_to(20)
.add_elements(['d'])
.add_elements(['e'])
.advance_processing_time(10)
.advance_watermark_to(300)
.add_elements([TimestampedValue('late', 12)])
.add_elements([TimestampedValue('last', 310)])
.advance_watermark_to_infinity()) # yapf: disable
class RecordFn(beam.DoFn):
def process(
self,
element=beam.DoFn.ElementParam,
timestamp=beam.DoFn.TimestampParam):
yield (element, timestamp)
options = PipelineOptions()
options.view_as(StandardOptions).streaming = True
with TestPipeline(options=options) as p:
my_record_fn = RecordFn()
records = p | test_stream | beam.ParDo(my_record_fn)
assert_that(
records,
equal_to([
('a', timestamp.Timestamp(10)),
('b', timestamp.Timestamp(10)),
('c', timestamp.Timestamp(10)),
('d', timestamp.Timestamp(20)),
('e', timestamp.Timestamp(20)),
('late', timestamp.Timestamp(12)),
('last', timestamp.Timestamp(310)),
]))
def test_multiple_outputs(self):
"""Tests that the TestStream supports emitting to multiple PCollections."""
letters_elements = [
TimestampedValue('a', 6),
TimestampedValue('b', 7),
TimestampedValue('c', 8),
]
numbers_elements = [
TimestampedValue('1', 11),
TimestampedValue('2', 12),
TimestampedValue('3', 13),
]
test_stream = (TestStream()
.advance_watermark_to(5, tag='letters')
.add_elements(letters_elements, tag='letters')
.advance_watermark_to(10, tag='numbers')
.add_elements(numbers_elements, tag='numbers')) # yapf: disable
class RecordFn(beam.DoFn):
def process(
self,
element=beam.DoFn.ElementParam,
timestamp=beam.DoFn.TimestampParam):
yield (element, timestamp)
options = StandardOptions(streaming=True)
p = TestPipeline(options=options)
main = p | test_stream
letters = main['letters'] | 'record letters' >> beam.ParDo(RecordFn())
numbers = main['numbers'] | 'record numbers' >> beam.ParDo(RecordFn())
assert_that(
letters,
equal_to([('a', Timestamp(6)), ('b', Timestamp(7)),
('c', Timestamp(8))]),
label='assert letters')
assert_that(
numbers,
equal_to([('1', Timestamp(11)), ('2', Timestamp(12)),
('3', Timestamp(13))]),
label='assert numbers')
p.run()
def test_multiple_outputs_with_watermark_advancement(self):
"""Tests that the TestStream can independently control output watermarks."""
# Purposely set the watermark of numbers to 20 then letters to 5 to test
# that the watermark advancement is per PCollection.
#
# This creates two PCollections, (a, b, c) and (1, 2, 3). These will be
# emitted at different times so that they will have different windows. The
# watermark advancement is checked by checking their windows. If the
# watermark does not advance, then the windows will be [-inf, -inf). If the
# windows do not advance separately, then the PCollections will both
# windowed in [15, 30).
letters_elements = [
TimestampedValue('a', 6),
TimestampedValue('b', 7),
TimestampedValue('c', 8),
]
numbers_elements = [
TimestampedValue('1', 21),
TimestampedValue('2', 22),
TimestampedValue('3', 23),
]
test_stream = (TestStream()
.advance_watermark_to(0, tag='letters')
.advance_watermark_to(0, tag='numbers')
.advance_wa
|
umyuu/Sample
|
src/Python3/Q113190/exsample.py
|
Python
|
mit
| 512 | 0.007813 |
# -*- coding: utf8 -*-
import subprocess
import os
from pathlib import Path
|
cwd = os.getcwd()
try:
print(os.getcwd())
subprocess.call(['make'])
# res = subprocess.check_output('uname -a',
|
shell=True)
res = subprocess.check_output(
r"./darknet detector test cfg/coco.data cfg/yolo.cfg yolo.weights /home/zaki/NoooDemo/0001.jpg", shell=True)
except Exception as ex:
print(ex)
finally:
os.chdir(cwd)
print(res)
def main() -> None:
pass
if __name__ == '__main__':
main()
|
OverTheWireOrg/OverTheWire-website
|
patreon/patreon.py
|
Python
|
mit
| 745 | 0.004027 |
#!/usr/bin/env python
import sy
|
s, json, csv, pprint
patrons = []
with open(sys.argv[1]) as csvfile:
csvrows = csv.DictReader(csvfile)
for row in csvrows:
name = row["Name"]
pledge = float(row["Pledge $"].replace("$",""))
lifetime = float(row["Lifetime $"].replace("$",""))
status = row["Patron Status"]
details = row["Additio
|
nal Details"]
since = row["Patronage Since Date"]
if details != "":
name = details
if status == "Active patron":
if lifetime > 0 and pledge >= 5:
patrons += [(name, lifetime, since)]
patreons = sorted(patrons, key=lambda x: x[2])
for (name, lifetime, since) in patreons:
print "* {}".format(name)
|
monsta-hd/ml-mnist
|
ml_mnist/nn/rbm.py
|
Python
|
mit
| 8,597 | 0.002326 |
import numpy as np
import env
from base import BaseEstimator
from utils import RNG, print_inline, width_format, Stopwatch
from layers import FullyConnected
from activations import sigmoid
class RBM(BaseEstimator):
"""
Examples
--------
>>> X = RNG(seed=1337).rand(32, 256)
>>> rbm = RBM(n_hidden=100,
... k=4,
... batch_size=2,
... n_epochs=50,
... learning_rate='0.05->0.005',
... momentum='0.5->0.9',
... verbose=True,
... early_stopping=5,
... random_seed=1337)
>>> rbm
RBM(W=None, batch_size=2, best_W=None, best_epoch=None, best_hb=None,
best_recon=inf, best_vb=None, early_stopping=5, epoch=0, hb=None, k=4,
learning_rate='0.05->0.005', momentum='0.5->0.9', n_epochs=50,
n_hidden=100, persistent=True, random_seed=1337, vb=None, verbose=True)
"""
def __init__(self, n_hidden=256, persistent=True, k=1,
batch_size=10, n_epochs=10, learning_rate=0.1, momentum=0.9,
early_stopping=None, verbose=False, random_seed=None):
self.n_hidden = n_hidden
self.persistent = persistent
self.k = k # k in CD-k / PCD-k
self.batch_size = batch_size
self.n_epochs = n_epochs
self.learning_rate = learning_rate
self._learning_rate = None
self.momentum = momentum
self._momentum = None
self.early_stopping = early_stopping
self._early_stopping = self.early_stopping
self.verbose = verbose
self.random_seed = random_seed
self.W = None
self.vb = None # visible units bias
self.hb = None # hidden units bias
self.epoch = 0
self.best_W = None
self.best_vb = None
self.best_hb = None
self.best_epoch = None
self.best_recon = np.inf
self._dW = None
self._dvb = None
self._dhb = None
self._rng = None
self._persistent = None
self._initialized = False
super(RBM, self).__init__(_y_required=False)
def propup(self, v):
"""Propagate visible units activation upwards to the hidden units."""
z = np.dot(v, self.W) + s
|
elf.hb
|
return sigmoid(z)
def sample_h_given_v(self, v0_sample):
"""Infer state of hidden units given visible units."""
h1_mean = self.propup(v0_sample)
h1_sample = self._rng.binomial(size=h1_mean.shape, n=1, p=h1_mean)
return h1_mean, h1_sample
def propdown(self, h):
"""Propagate hidden units activation downwards to the visible units."""
z = np.dot(h, self.W.T) + self.vb
return sigmoid(z)
def sample_v_given_h(self, h0_sample):
"""Infer state of visible units given hidden units."""
v1_mean = self.propdown(h0_sample)
v1_sample = self._rng.binomial(size=v1_mean.shape, n=1, p=v1_mean)
return v1_mean, v1_sample
def gibbs_hvh(self, h0_sample):
"""Performs a step of Gibbs sampling starting from the hidden units."""
v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return v1_mean, v1_sample, h1_mean, h1_sample
def gibbs_vhv(self, v0_sample):
"""Performs a step of Gibbs sampling starting from the visible units."""
raise NotImplementedError()
def free_energy(self, v_sample):
"""Function to compute the free energy."""
raise NotImplementedError()
def update(self, X_batch):
# compute positive phase
ph_mean, ph_sample = self.sample_h_given_v(X_batch)
# decide how to initialize chain
if self._persistent is not None:
chain_start = self._persistent
else:
chain_start = ph_sample
# gibbs sampling
for step in xrange(self.k):
nv_means, nv_samples, \
nh_means, nh_samples = self.gibbs_hvh(chain_start if step == 0 else nh_samples)
# update weights
self._dW = self._momentum * self._dW + \
np.dot(X_batch.T, ph_mean) - np.dot(nv_samples.T, nh_means)
self._dvb = self._momentum * self._dvb +\
np.mean(X_batch - nv_samples, axis=0)
self._dhb = self._momentum * self._dhb +\
np.mean(ph_mean - nh_means, axis=0)
self.W += self._learning_rate * self._dW
self.vb += self._learning_rate * self._dvb
self.hb += self._learning_rate * self._dhb
# remember state if needed
if self.persistent:
self._persistent = nh_samples
return np.mean(np.square(X_batch - nv_means))
def batch_iter(self, X):
n_batches = len(X) / self.batch_size
for i in xrange(n_batches):
start = i * self.batch_size
end = start + self.batch_size
X_batch = X[start:end]
yield X_batch
if n_batches * self.batch_size < len(X):
yield X[end:]
def train_epoch(self, X):
mean_recons = []
for i, X_batch in enumerate(self.batch_iter(X)):
mean_recons.append(self.update(X_batch))
if self.verbose and i % (len(X)/(self.batch_size * 16)) == 0:
print_inline('.')
if self.verbose: print_inline(' ')
return np.mean(mean_recons)
def _fit(self, X):
if not self._initialized:
layer = FullyConnected(self.n_hidden,
bias=0.,
random_seed=self.random_seed)
layer.setup_weights(X.shape)
self.W = layer.W
self.vb = np.zeros(X.shape[1])
self.hb = layer.b
self._dW = np.zeros_like(self.W)
self._dvb = np.zeros_like(self.vb)
self._dhb = np.zeros_like(self.hb)
self._rng = RNG(self.random_seed)
self._rng.reseed()
timer = Stopwatch(verbose=False).start()
for _ in xrange(self.n_epochs):
self.epoch += 1
if self.verbose:
print_inline('Epoch {0:>{1}}/{2} '.format(self.epoch, len(str(self.n_epochs)), self.n_epochs))
if isinstance(self.learning_rate, str):
S, F = map(float, self.learning_rate.split('->'))
self._learning_rate = S + (F - S) * (1. - np.exp(-(self.epoch - 1.)/8.)) / (
1. - np.exp(-(self.n_epochs - 1.)/8.))
else:
self._learning_rate = self.learning_rate
if isinstance(self.momentum, str):
S, F = map(float, self.momentum.split('->'))
self._momentum = S + (F - S) * (1. - np.exp(-(self.epoch - 1)/4.)) / (
1. - np.exp(-(self.n_epochs - 1)/4.))
else:
self._momentum = self.momentum
mean_recon = self.train_epoch(X)
if mean_recon < self.best_recon:
self.best_recon = mean_recon
self.best_epoch = self.epoch
self.best_W = self.W.copy()
self.best_vb = self.vb.copy()
self.best_hb = self.hb.copy()
self._early_stopping = self.early_stopping
msg = 'elapsed: {0} sec'.format(width_format(timer.elapsed(), default_width=5, max_precision=2))
msg += ' - recon. mse: {0}'.format(width_format(mean_recon, default_width=6, max_precision=4))
msg += ' - best r-mse: {0}'.format(width_format(self.best_recon, default_width=6, max_precision=4))
if self.early_stopping:
msg += ' {0}*'.format(self._early_stopping)
if self.verbose:
print msg
if self._early_stopping == 0:
return
if self.early_stopping:
self._early_stopping -= 1
def _serialize(self, params):
for attr in ('W', 'best_W',
'vb', 'best_vb',
'hb', 'best_hb'):
if attr in params and params[attr] is not None:
params[attr] = params[attr].tolist()
return p
|
cfelton/rhea
|
rhea/cores/video/vga/vga_intf.py
|
Python
|
mit
| 1,435 | 0 |
from myhdl import *
class VGA:
def __init__(self, color_depth=(10, 10, 10,)):
"""
color_depth the number of bits per RGB
"""
self.N = color_depth
# the sync signals
self.hsync = Signal(bool(1))
self.vsync = Signal(bool(1))
# the RGB signals to the video
cd = color_depth
self.red = Signal(intbv(0)[cd[0]:])
self.green = Signal(intbv(0)[cd[1]:])
s
|
elf.blue = Signal(intbv(0)[cd[2]:])
# logic VGA timing signals, used internally only
self.pxlen = Signal(bool(0))
self.active = Signal(bool(0))
# @todo: move this to the `vga_sync` this is specific to the
# VGA driver and not the intefaces ???
# these are used for verification and debug only.
self.states = enum('NONE', 'ACTIVE',
'HOR_FRONT_PORCH', 'HSYNC', 'HOR_BACK_PORCH',
|
'VER_FRONT_PORCH', 'VSYNC', 'VER_BACK_PORCH')
self.state = Signal(self.states.ACTIVE)
def assign(self, hsync, vsync, red, green, blue, pxlen=None, active=None):
""" in some cases discrete signals are connected """
self.hsync = hsync
self.vsync = vsync
self.red = red
self.green = green
self.blue = blue
if pxlen is not None:
self.pxlen = pxlen
if pxlen is not None:
self.active = active
|
pwicks86/adventofcode2016
|
day03/p1.py
|
Python
|
mit
| 190 | 0.010526 |
f = open('input.txt')
triangle
|
s = [map(int,l.split()) for l in f.rea
|
dlines()]
possible = 0
for t in triangles:
t.sort()
if t[0] + t[1] > t[2]:
possible += 1
print(possible)
|
wangqiang8511/troposphere
|
examples/EC2Conditions.py
|
Python
|
bsd-2-clause
| 2,279 | 0 |
from __future__ import print_function
from troposphere import (
Template, Parameter, Ref, Condition, Equals, And, Or, Not, If
)
from troposphere import ec2
parameters = {
"One": Parameter(
"One",
Type="String",
),
"Two": Parameter(
"Two",
Type="String",
),
"Three": Parameter(
"Three",
Type="String",
),
"Four": Parameter(
"Four",
Type="String",
),
"SshKeyName": Parameter(
"SshKeyName",
Type="String",
)
}
conditions = {
"OneEqualsFoo": Equals(
Ref("One"),
"Foo"
),
"NotOneEqualsFoo": Not(
Condition("OneEqualsFoo")
),
"BarEqualsTwo": Equals(
"Bar",
Ref("Two")
),
"ThreeEqualsFour": Equals(
Ref("Three"),
Ref("Four")
),
"OneEqualsFooOrBarEqualsTwo": Or(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo")
),
"OneEqualsFooAndNotBarEqualsTwo": And(
Condition("OneEqualsFoo"),
Not(Condition("BarEqualsTwo"))
),
"OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo"),
E
|
quals(Ref("Three"), "Pft")
),
"OneIsQuzAndThreeEqualsFour": And(
Equals(Ref("One"), "Quz"),
Condition("ThreeEqualsFour")
),
"LaunchInstance": And(
Condition("OneEqualsFoo"),
Condition("NotOneEqualsFoo"),
Condition("BarEqualsTwo"),
Condition("OneEqualsFooAndNotBarEqualsTwo"),
Conditi
|
on("OneIsQuzAndThreeEqualsFour")
),
"LaunchWithGusto": And(
Condition("LaunchInstance"),
Equals(Ref("One"), "Gusto")
)
}
resources = {
"Ec2Instance": ec2.Instance(
"Ec2Instance",
Condition="LaunchInstance",
ImageId=If("ConditionNameEqualsFoo", "ami-12345678", "ami-87654321"),
InstanceType="t1.micro",
KeyName=Ref("SshKeyName"),
SecurityGroups=["default"],
)
}
def template():
t = Template()
for p in parameters.values():
t.add_parameter(p)
for k in conditions:
t.add_condition(k, conditions[k])
for r in resources.values():
t.add_resource(r)
return t
print(template().to_json())
|
knyghty/bord
|
bord/urls.py
|
Python
|
mit
| 413 | 0 |
"""Root
|
URL definitions."""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.
|
urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^accounts/', include('accounts.urls', namespace='accounts')),
url(r'^admin/', admin.site.urls),
url(r'^', include('core.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
hitaian/dogSpider
|
runSpider.py
|
Python
|
bsd-2-clause
| 1,090 | 0.013889 |
from Spider import *
from config import *
class run():
def __init__(self):
pass
#获取所有列表
def get_list(self):
list = cfg.getType()
for l in list.keys():
print(l)
return list
#获取用户选择的分类
def input_type(self):
start = spider(url)
list = self.get_list()
#type = input('输入要选择下载的类型分类(复制一下就好):\n')
type = '亚洲无码'
if type in cfg.getType() and type == '亚洲无码':
print('有')
newDir = start.newDir(type)
l
|
istLink = list[type]
#攻取列表链接
oneList = start.openLink(listLink,type)
# 开始解析内容页面
#oneContent = start.getContent(oneList,type)
elif type in cfg.getType() and type == '成人小说':
|
pass
else :
print('没有或者暂时不支持此类型下载')
if __name__ == '__main__':
cfg = config()
url = cfg.url
a = run()
a.input_type()
|
pipermerriam/perjury
|
setup.py
|
Python
|
bsd-2-clause
| 565 | 0.021239 |
#!/usr/bin/env python
from setuptools import setup, find_packages
__doc__ = """
Falsify data
"""
version = '0.0.1'
setup(name='perjury',
version=version,
description=__doc__,
author='Aaron Merriam',
|
author_email='aaronmerriam@gmail.com',
keywords='content',
long_description=__doc__,
url='https://github.com/aaronmerriam/foundry',
packages=find_packages(),
platforms="any",
license='BSD',
test_suite='tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Natural Language :: English',
|
],
)
|
rgtjf/Semantic-Texual-Similarity-Toolkits
|
stst/features/features_nn.py
|
Python
|
mit
| 4,605 | 0.001533 |
# coding: utf8
"""
1. 每个文档自己训练一个Doc2Vec
2. 所有文档一起训练一个Doc2Vec
"""
import json
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
from stst.modules.features import Feature
from stst import utils
from stst import config
from stst.data import dict_utils
from stst.libs.kernel import vector_kernel as vk
class Doc2VecFeature(Feature):
def extract_instances(self, train_instances):
senten
|
ces = []
for idx, train_instance in enumerate(train_instances):
sa, sb = train_instance.get_word(type='lemma', lower=True)
sentences.append(TaggedDocument(words=sa, tags=['sa_%d' % idx]))
sentences.
|
append(TaggedDocument(words=sb, tags=['sb_%d' % idx]))
model = Doc2Vec(sentences, size=25, window=3, min_count=0, workers=10, iter=1000)
features = []
infos = []
for idx in range(len(train_instances)):
vec_a = model.docvecs['sa_%d' % idx]
vec_b = model.docvecs['sb_%d' % idx]
feature, info = vk.get_all_kernel(vec_a, vec_b)
features.append(feature)
infos.append([])
# infos.append([vec_a, vec_b])
return features, infos
# def load_instances(self, train_instances):
# """
# extract cosine distance from already trained feature file
# without modify the feature_file
# this function's priority is higher that the above extract_instances
# """
#
# _features, _n_dim, _n_instance = Feature.load_feature_from_file(self.feature_file)
# features = []
# infos = []
# ''' get features from train instances'''
# for _feature in _features:
# feature = Feature._feat_string_to_list(_feature, _n_dim)
# features.append([feature[1]])
# infos.append(['cosine'])
#
# features = [ Feature._feat_list_to_string(feature) for feature in features ]
#
# return features, 1, _n_instance
class Doc2VecGlobalFeature(Feature):
def __init__(self, **kwargs):
super(Doc2VecGlobalFeature, self).__init__(**kwargs)
def extract_instances(self, train_instances):
model = dict_utils.DictLoader().load_doc2vec()
file_name = self.train_file.split('/')[-1]
features = []
infos = []
for idx in range(len(train_instances)):
vec_a = model.docvecs['%s_%d_sa' % (file_name, idx)]
vec_b = model.docvecs['%s_%d_sb' % (file_name, idx)]
# train_instance = train_instances[idx]
# sa, sb = train_instance.get_word(type='lemma', stopwords=True, lower=True)
# vec_a = model.infer_vector(sa)
# vec_b = model.infer_vector(sb)
feature, info = vk.get_all_kernel(vec_a, vec_b)
features.append(feature)
infos.append(info)
return features, infos
class ICLRScoreFeature(Feature):
def __init__(self, nntype, **kwargs):
super(ICLRScoreFeature, self).__init__(**kwargs)
self.nntype = nntype
self.feature_name = self.feature_name + '-%s' % (nntype)
def extract_instances(self, train_instances):
features = []
infos = []
input_file = self.feature_file.split('/')[-2] + '.txt'
f_in = utils.create_read_file(config.NN_FEATURE_PATH + '/' + self.nntype + '/' + input_file)
for line in f_in:
line = line.strip()
obj = json.loads(line)
sc = obj[0] / 5.0
features.append([sc])
infos.append([])
print(len(features), features[0])
return features, infos
class ICLRVectorFeature(Feature):
def __init__(self, nntype, **kwargs):
super(ICLRVectorFeature, self).__init__(**kwargs)
self.nntype = nntype
self.feature_name = self.feature_name + '-%s' % (nntype)
def extract_instances(self, train_instances):
features = []
infos = []
input_file = self.feature_file.split('/')[-2] + '.txt'
f_in = utils.create_read_file(config.NN_FEATURE_PATH + '/' + self.nntype + '/' + input_file)
for line in f_in:
line = line.strip()
obj = json.loads(line)
emb1 = obj[1]
emb2 = obj[2]
emb1 = vk.normalize(emb1)
emb2 = vk.normalize(emb2)
feats, info = vk.get_all_kernel(emb1, emb2)
features.append(feats)
infos.append(info)
print(len(features), features[0], infos[0])
return features, infos
|
bowen0701/algorithms_data_structures
|
lc0796_rotate_string.py
|
Python
|
bsd-2-clause
| 1,408 | 0.00071 |
"""Leetcode 796. Rotate String
Easy
URL: https://leetcode.com/problems/rotate-string/
We are given two strings, A and B.
A shift on A consists of taking string A and moving the leftmost character to
the rightmost position. For example, if A = 'abcde', then it will be 'bcdea'
after one shift on A. Return True if and only if A can become B after some
number of shifts on A.
Example 1:
Input: A = 'abcde', B = 'cdeab'
Output: true
Example 2:
Input: A = 'abcde', B = 'abced'
Output: false
Note:
A and B will have length at most 100.
"""
class SolutionStringConcatSubstring(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
Time complexity: O(2n+2n*n)=O(n^2).
Space complexity:O(n).
"""
# Check if lengths are not equal.
if len(A) != len(B):
return False
# If rotate string, B is s
|
ubstring of concated string A + A.
AA = A + A
if B in AA:
return True
else:
return False
def main():
# Input: A = 'abcde', B = 'cdeab'
# Output: true
A = 'abcde'
B = 'cdeab'
print SolutionStringConcatSubstring().rotateString(A, B)
# Input: A = 'abcde', B = 'abced'
|
# Output: false
A = 'abcde'
B = 'abced'
print SolutionStringConcatSubstring().rotateString(A, B)
if __name__ == '__main__':
main()
|
cloudnull/bookofnova
|
bookofnova/logger.py
|
Python
|
apache-2.0
| 3,372 | 0.000593 |
# ==============================================================================
# Copyright [2013] [Kevin Carter]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
from bookofnova.info import __appname__ as appname
class NoLogLevelSet(Exception):
pass
class Logging(object):
def __init__(self, log_level, log_file=None):
self.log_level = log_level
self.log_file = log_file
def logger_setup(self):
"""
Setup logging for your application
"""
logger = logging.getLogger("%s" % (appname.upper()))
avail_level = {'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'CRITICAL': logging.CRITICAL,
'WARN': logging.WARN,
'ERROR': logging.ERROR}
_log_level = self.log_level.upper()
if _log_level in avail_level:
lvl = avail_level[_log_level]
logger.setLevel(lvl)
formatter = logging.Formatter("%(asctime)s - %(name)s:%(levelname)s ==>"
" %(message)s")
else:
raise NoLogLevelSet('I died because you did not set a known log level')
# Building Handeler
if self.log_file:
handler = logging.FileHandler(self.log_file)
else:
handler = logging.StreamHandler()
handler.setLevel(lvl)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def return_logfile(filename):
"""
Return a path for logging file.
IF "/var/log/" does not exist, or you dont have write permissions to
"/var/log/" the log file will be in your working directory
Check for ROOT u
|
ser if not log to working directory
"""
if os.path.isfile(filename):
return filename
else:
user = os.getuid()
logname = ('%s' % filename)
if not user == 0:
logfile = logname
else:
if os.path.isdir('/var/log'):
log_loc = '/var/log'
else:
try:
os.mkdir('%s' % log_loc)
logfile = '%s/%s' % (log_loc, log
|
name)
except Exception:
logfile = '%s' % logname
return logfile
def load_in(log_file=None, log_level='info', output=None):
"""
Load in the log handler. If output is not None, systen will use the default
Log facility.
"""
if not output:
if log_file:
_log_file = return_logfile(filename=log_file)
log = Logging(log_level=log_level, log_file=_log_file)
output = log.logger_setup()
else:
output = Logging(log_level=log_level).logger_setup()
return output
|
scottbarstow/iris-python
|
iris_sdk/models/maps/cities.py
|
Python
|
mit
| 142 | 0.014085 |
#!/usr/bin/env python
from ir
|
is_sdk.models.maps.base_map import BaseMap
class CitiesMap(BaseMap):
result_count
|
= None
cities = None
|
braincorp/robustus
|
robustus/tests/test_robustus.py
|
Python
|
mit
| 8,887 | 0.006414 |
# =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import doctest
import logging
import os
import pytest
import robustus
from robustus.detail import check_module_available
from robustus.detail.utility import run_shell, check_run_shell
import shutil
import subprocess
import tempfile
def test_doc_tests():
doctest.testmod(robustus, raise_on_error=True)
doctest.testmod(robustus.detail.utility, raise_on_error=True)
def test_run_shell():
def check(command, expected_ret_code, expected_output, verbose):
tf = tempfile.TemporaryFile('w+')
assert run_shell(command, shell=True, stdout=tf, verbose=verbose) == expected_ret_code
tf.seek(0)
assert tf.read() == expected_output
try:
exception_occured = False
check_run_shell(command, shell=True, verbose=verbose)
except subprocess.CalledProcessError:
exception_occured = True
assert exception_occured == (exception_occured != 0)
check('echo robustus', 0, 'robustus\n', verbose=True)
check('echo robustus', 0, 'robustus\n', verbose=False)
check('echo robustus && exit 1', 1, 'robustus\n', verbose=True)
check('echo robustus && exit 1', 1, 'robustus\n', verbose=False)
def test_robustus(tmpdir):
tmpdir.chdir()
test_env = 'test_env'
# create env and install some packages
logging.info('creating ' + test_env)
robustus.execute(['env', test_env])
assert os.path.isdir(test_env)
assert os.path.isfile(os.path.join(test_env, '.robustus'))
robustus_executable = os.path.join(test_env, 'bin/robustus')
assert os.path.isfile(robustus_executable)
# install some packages
logging.info('installing requirements into ' + test_env)
run_shell([robustus_executable, 'install', 'pyserial'])
test_requirements1 = 'test_requirements1.txt'
with open(test_requirements1, 'w') as file:
file.write('pep8==1.3.3\n')
file.write('pytest==2.3.5\n')
run_shell([robustus_executable, 'install', '-r', test_requirements1])
# check packages are installed
packages_to_check = ['pyserial', 'pep8==1.3.3', 'pytest==2.3.5']
with open('freezed_requirements.txt', 'w') as req_file:
run_shell([robustus_executable, 'freeze'], stdout=req_file)
with open('freezed_requirements.txt') as req_file:
installed_packages = [line.strip() for line in req_file]
for package in packages_to_check:
assert package in installed_packages
assert check_module_available(test_env, 'serial')
assert check_module_available(test_env, 'pep8')
assert check_module_available(test_env, 'pytest')
shutil.rmtree(test_env)
def create_editable_environment(tmpdir):
"""Create an environment with an editable (shared between some tests) and
chdir into it."""
base_dir = str(tmpdir.mkdir('test_perrepo_env'))
test_env = os.path.join(base_dir, 'env')
working_dir = os.path.join(base_dir, 'working_dir')
# create env and install some packages
logging.info('creating ' + test_env)
os.mkdir(working_dir)
os.chdir(working_dir)
os.system('git init .')
robustus.execute(['env', test_env])
os.chdir(working_dir)
robustus_executable = os.path.join(test_env, 'bin/robustus')
test_requirements = os.path.join(working_dir, 'requirements.txt')
with open(test_requirements, 'w') as file:
file.write('-e git+https://github.com/braincorp/robustus-test-repo.git@master#egg=ardrone\n')
run_shell([robustus_executable, 'install', '-r', test_requirements])
return working_dir, test_env, robustus_executable
def test_pereditable(tmpdir):
"""Create a package with some editable requirements and check
that perrepo runs as expected."""
working_dir, test_env, robustus_executable = create_editable_environment(tmpdir)
# Now check that robustus behaves as expected
run_shell([robustus_executable, 'perrepo', 'touch', 'foo'])
assert os.path.exists(os.path.join(working_dir, 'foo'))
assert os.path.exists(os.path.join(test_env, 'src', 'ardrone', 'foo'))
def test_reset(tmpdir):
"""Try reset the environment"""
working_dir, test_env, robustus_executable = create_editable_environment(tmpdir)
# Change a file in the repo and check it is reset
changed_filepath = os.path.join(test_env, 'src', 'ardrone', 'README')
original_
|
content = open(changed_filepath, 'r').read()
f = open(changed_filepath, 'w')
f.write('junk')
f.close()
run_shell([robustus_executable, 'reset', '-f'])
assert original_content == open(changed_filepath, 'r').read()
def test_install_with_tag(tmpdir):
"""Crea
|
te a package with some editable requirements and install using a tag."""
base_dir = str(tmpdir.mkdir('test_perrepo_env'))
test_env = os.path.join(base_dir, 'env')
working_dir = os.path.join(base_dir, 'working_dir')
# create env and install some packages
logging.info('creating ' + test_env)
os.mkdir(working_dir)
os.chdir(working_dir)
os.system('git init .')
robustus.execute(['env', test_env])
os.chdir(working_dir)
robustus_executable = os.path.join(test_env, 'bin/robustus')
test_requirements = os.path.join(working_dir, 'requirements.txt')
with open(test_requirements, 'w') as file:
file.write('-e git+https://github.com/braincorp/robustus-test-repo.git@master#egg=robustus-test-repo\n')
run_shell([robustus_executable, 'install', '--tag', 'test-tag', '-r', test_requirements])
# Now check that robustus behaves as expected
assert os.path.exists(os.path.join(test_env, 'src', 'robustus-test-repo', 'test-tag'))
def test_install_with_branch_testing(tmpdir):
"""Create a package with some editable requirements and install using a branch
and check that one repo with the branch gets checked out using the branch
and the other ends up on master (this is how testing is often done)."""
base_dir = str(tmpdir.mkdir('test_perrepo_env'))
test_env = os.path.join(base_dir, 'env')
working_dir = os.path.join(base_dir, 'working_dir')
# create env and install some packages
logging.info('creating ' + test_env)
os.mkdir(working_dir)
os.chdir(working_dir)
# creat a new local repo
os.system('git init .')
setup_file_content =\
'''
from setuptools import setup, find_packages
setup(
name='test_perrepo_env',
author='Brain Corporation',
author_email='sinyavskiy@braincorporation.com',
url='https://github.com/braincorp/test_perrepo_env',
long_description='',
version='dev',
packages=find_packages(),
include_package_data=True,
install_requires=[])
'''
setup_file = os.path.join(working_dir, 'setup.py')
with open(setup_file, 'w') as file:
file.write(setup_file_content)
test_requirements = os.path.join(working_dir, 'requirements.txt')
with open(test_requirements, 'w') as file:
file.write('-e git+https://github.com/braincorp/robustus-test-repo.git@master#egg=robustus-test-repo\nmock==0.8.0\n-e git+https://github.com/braincorp/filecacher.git@master#egg=filecacher\n')
os.system('git add setup.py')
os.system('git add requirements.txt')
os.system('git commit -am "setup and reqs"')
# create test branch
os.system('git checkout -b test-branch')
test_file_on_test_branch = os.path.join(working_dir, 'root_test_branch.file')
with open(test_file_on_test_branch, 'w') as file:
file.write('root test')
os.system('git add root_test_branch.file')
os.system('git commit -am "root_test_branch.file"')
os.system('git checkout master')
robustus.execute(['env', test_env])
os.chdir(working_dir)
robustus_executable = os.path.join(test_env, 'bin/robustus')
run_shell([robustus_executable, 'install', '-e', '.', '--tag', 'test-branch', '--ignore-missing-refs'], verbose = True)
# Now check that robustus behaves as ex
|
dhoffman34/django
|
django/db/backends/mysql/base.py
|
Python
|
bsd-3-clause
| 23,595 | 0.00267 |
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper)
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str, force_text
from django.db.backends.mysql.schema import DatabaseSchemaEditor
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
|
if e.args[0] in self.codes_for_integrityerror:
|
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
# XXX MySQL DB-API drivers currently fail on binary data on Python 3.
supports_binary_field = six.PY2
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_binary_field = False
can_introspect_boolean_field = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
uses_savepoints = True
atomic_transactions = False
supports_column_check_constraints = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluat
|
gjost/foxtabs
|
tests/test_foxtabs.py
|
Python
|
bsd-3-clause
| 382 | 0.005236 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_foxtabs
----------------------------------
Tests for `foxtabs` module.
"""
import unittest
from foxtabs import foxtabs
class TestFoxtabs(unittest.TestCase):
def setUp(self):
|
pass
def test_somethin
|
g(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
naitoh/py2rb
|
tests/basic/while.py
|
Python
|
mit
| 271 | 0.01107 |
""" while case """
x = 1
while x<10:
print(x)
x = x + 1
""" while and else case """
x = 1
while x<10:
print(x)
x = x + 1
else:
print("ok")
""" while and else break ca
|
se ""
|
"
x = 1
while x<10:
print(x)
x = x + 1
break
else:
print("ok")
|
mitdbg/modeldb
|
client/verta/verta/_swagger/_public/modeldb/model/ModeldbUpdateDatasetDescriptionResponse.py
|
Python
|
mit
| 638 | 0.010972 |
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbUpdateDatasetDescriptionResponse(BaseType):
def __init__(self, dataset=None):
required = {
"dataset": False,
}
self.dataset = dataset
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def
|
from_json(d):
from .ModeldbDataset import ModeldbDataset
tmp = d.get('dataset', None)
if tmp is not None:
d['da
|
taset'] = ModeldbDataset.from_json(tmp)
return ModeldbUpdateDatasetDescriptionResponse(**d)
|
openhatch/oh-mainline
|
vendor/packages/mechanize/mechanize/_msiecookiejar.py
|
Python
|
agpl-3.0
| 14,694 | 0.002178 |
"""Microsoft Internet Explorer cookie loading on Windows.
Copyright 2002-2003 Johnny Lee <typo_pl@hotmail.com> (MSIE Perl code)
Copyright 2002-2006 John J Lee <jjl@pobox.com> (The Python port)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX names and comments are not great here
import os, re, time, struct, logging
if os.name == "nt":
import _winreg
from _clientcookie import FileCookieJar, CookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("mechanize").debug
def regload(path, leaf):
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
try:
value = _winreg.QueryValueEx(key, leaf)[0]
except WindowsError:
value = None
return value
WIN32_EPOCH = 0x019db1ded53e8000L # 1970 Jan 01 00:00:00 in Win32 FILETIME
def epoch_time_offset_from_win32_filetime(filetime):
"""Convert from win32 filetime to seconds-since-epoch value.
MSIE stores create and expire times as Win32 FILETIME, which is 64
bits of 100 nanosecond intervals since Jan 01 1601.
mechanize expects time in 32-bit value expressed in seconds since the
epoch (Jan 01 1970).
"""
if filetime < WIN32_EPOCH:
raise ValueError("filetime (%d) is before epoch (%d)" %
(filetime, WIN32_EPOCH))
return divmod((filetime - WIN32_EPOCH), 10000000L)[0]
def binary_to_char(c): return "%02X" % ord(c)
def binary_to_str(d): return "".join(map(binary_to_char, list(d)))
class MSIEBase:
magic_re = re.compile(r"Client UrlCache MMF Ver \d\.\d.*")
padding = "\x0d\xf0\xad\x0b"
msie_domain_re = re.compile(r"^([^/]+)(/.*)$")
cookie_re = re.compile("Cookie\:.+\@([\x21-\xFF]+).*?"
"(.+\@[\x21-\xFF]+\.txt)")
# path under HKEY_CURRENT_USER from which to get location of index.dat
reg_path = r"software\microsoft\windows" \
r"\currentversion\explorer\shell folders"
reg_key = "Cookies"
def __init__(self):
self._delayload_domains = {}
def _delayload_domain(self, domain):
# if necessary, lazily load cookies for this domain
delayload_info = self._delayload_domains.get(domain)
if delayload_info is not None:
cookie_file, ignore_discard, ignore_expires = delayload_info
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s", cookie_file)
else:
del self._delayload_domains[domain]
def _load_cookies_from_file(self, filename):
debug("Loading MSIE cookies file: %s", filename)
cookies = []
cookies_fh = open(filename)
try:
while 1:
key = cookies_fh.readline()
if key == "": break
rl = cookies_fh.readline
def getlong(rl=rl): return long(rl().rstrip())
def getstr(rl=rl): return rl().rstrip()
key = key.rstrip()
value = getstr()
domain_path = getstr()
flags = getlong() # 0x2000 bit is for secure I think
lo_expire = getlong()
hi_expire = getlong()
lo_create = getlong()
hi_create = getlong()
sep = getstr()
if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
hi_create, lo_create, sep) or (sep != "*"):
break
m = self.msie_domain_re.search(domain_path)
if m:
domain = m.group(1)
path = m.group(2)
cookies.append({"KEY": key, "VALUE": value,
"DOMAIN": domain, "PATH": path,
"FLAGS": flags, "HIXP": hi_expire,
"LOXP": lo_expire, "HICREATE": hi_create,
"LOCREATE": lo_create})
finally:
cookies_fh.close()
return cookies
def load_cookie_data(self, filename,
ignore_discard=False, ignore_expires=False):
"""Load cookies from file containing actual cookie data.
Old cookies are kept unless overwritten by newly loaded ones.
You should not call this method if the delayload attribute is set.
I think each of these files contain all cookies for one user, domain,
and path.
filename: file containing cookies -- usually found in a file like
C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt
"""
now = int(time.time())
cookie_data = self._load_cookies_from_file(filename)
for cookie in cookie_data:
flags = cookie["FLAGS"]
secure = ((flags & 0x2000) != 0)
filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
expires = epoch_time_offset_from_win32_
|
filetime(filetime)
if expires < now:
discard = True
else:
discard =
|
False
domain = cookie["DOMAIN"]
initial_dot = domain.startswith(".")
if initial_dot:
domain_specified = True
else:
# MSIE 5 does not record whether the domain cookie-attribute
# was specified.
# Assuming it wasn't is conservative, because with strict
# domain matching this will match less frequently; with regular
# Netscape tail-matching, this will match at exactly the same
# times that domain_specified = True would. It also means we
# don't have to prepend a dot to achieve consistency with our
# own & Mozilla's domain-munging scheme.
domain_specified = False
# assume path_specified is false
# XXX is there other stuff in here? -- e.g. comment, commentURL?
c = Cookie(0,
cookie["KEY"], cookie["VALUE"],
None, False,
domain, domain_specified, initial_dot,
cookie["PATH"], False,
secure,
expires,
discard,
None,
None,
{"flags": flags})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
CookieJar.set_cookie(self, c)
def load_from_registry(self, ignore_discard=False, ignore_expires=False,
username=None):
"""
username: only required on win9x
"""
cookies_dir = regload(self.reg_path, self.reg_key)
filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
self.load(filename, ignore_discard, ignore_expires, username)
def _really_load(self, index, filename, ignore_discard, ignore_expires,
username):
now = int(time.time())
if username is None:
username = os.environ['USERNAME'].lower()
cookie_dir = os.path.dirname(filename)
data = index.read(256)
if len(data) != 256:
raise LoadError("%s file is too short" % filename)
# Cookies' index.dat file starts with 32 bytes of signature
# followed by an offset to the first record, stored as a little-
# endian DWORD.
sig, size, data = data[:32], data[32:36], data[36:]
size = struct.unpack("<L", size)[0]
# check that sig is valid
if not self.magic_re.match(sig) or size != 0x4000:
raise LoadError("%s ['%s' %s] does not seem to contain cookies" %
(str(filename), sig, size))
# s
|
freeworldxbmc/pluging.video.Jurassic.World.Media
|
resources/lib/resolvers/clicknupload.py
|
Python
|
gpl-3.0
| 2,319 | 0.012937 |
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,time
from resources.lib.libraries import c
|
lient
from resources.lib.libraries import captcha
def resolve(url):
try:
result = client.request(url)
if '>File Not Found<' in result: raise Exception()
post = {}
f = client.parseDOM(result, 'Form', attrs = {'action': ''})
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
|
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = client.request(url, post=post, close=False)
post = {}
f = client.parseDOM(result, 'Form', attrs = {'action': '' })
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post.update({'method_free': 'Free Download'})
try: post.update(captcha.request(result))
except: pass
post = urllib.urlencode(post)
for i in range(0, 10):
try:
result = client.request(url, post=post, close=False)
if not '>File Download Link Generated<' in result: raise Exception()
except:
time.sleep(1)
url = client.parseDOM(result, 'a', ret='onClick')
url = [i for i in url if i.startswith('window.open')][0]
url = re.compile('[\'|\"](.+?)[\'|\"]').findall(url)[0]
return url
except:
return
|
aruizramon/alec_erpnext
|
erpnext/selling/doctype/used_sales_order/used_sales_order.py
|
Python
|
agpl-3.0
| 276 | 0.01087 |
# Copyright (c) 2013, Web Notes Technologies Pvt
|
. Ltd. and Co
|
ntributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class UsedSalesOrder(Document):
pass
|
jadjay/GestionBoissonDj
|
gestionboisson/consommateurs/models.py
|
Python
|
gpl-3.0
| 1,011 | 0.023739 |
from __future__ import unicode_literals
from django.db import models
from django.forms import ModelForm
from django.contrib.auth.models import User
from boissons.models import boissons
# Create your models here.
class consommateurs(models.Model):
def __str__(self):
return "%s" % (self.user.username)
user = models.OneToOneField(User, on_delete=models.CASCADE)
activation_key = models.CharField(max_length=60)
|
key_expires = models.DateTimeField()
class consommation(models.Model):
def __str__(self):
manuel = "MANUEL" if self.manuel else ""
return "%s %s %s %s" % (self.date.strftime("%F"),self.consommateur.user.username,self.boisson.name,manuel)
date = models.DateField(auto_now_add=True)
consommateur = models.ForeignKey('consommateurs',on_delete=models.PROTECT)
boisson = models.ForeignKey('boissons.boissons',on_delete=models.PROTECT)
manuel = models.BooleanField(defa
|
ult=True)
class ConsommateursForm(ModelForm):
class Meta:
model = User
fields = ('username', 'email', 'password')
|
sonusz/PhasorToolBox
|
phasortoolbox/parser/common.py
|
Python
|
mit
| 9,531 | 0.002833 |
# This is modified from a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import array
import struct
import zlib
from enum import Enum
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
if parse_version(ks_version) < parse_version('0.7'):
raise Exception(
"Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
from .cfg_2 import Cfg2
from .header import Header
from .data import Data
from .cfg_3 import Cfg3
from .command import Command
def _kaitai_repr(self):
_repr_list = []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes, bool):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def _enum_repr(self):
_repr_list = []
for item in ("name", "value"):
_r = getattr(self, item)
_repr_list.append("=".join((item, _r.__repr__())))
return "<" + self.__class__.__name__[:-4] + " |" + ", ".join(_repr_list) + ">"
def _kaitai_show(self, parent_path=' '):
if type(self) in (int, float, str, bytes, bool):
print(" == ".join((parent_path, self.__repr__())))
elif type(self) == list:
for i, item in enumerate(self):
try:
item.show('{}[{}]'.format(parent_path,i))
except:
_kaitai_show(item,'{}[{}]'.format(parent_path,i))
else:
for item in sorted(vars(self)):
if not item.startswith('_'):
_r = getattr(self, item)
try:
_r.show(parent_path+'.'+item)
except:
_kaitai_show(_r,parent_path+'.'+item)
def _enum_show(self, parent_path=' '):
for item in ("name", "value"):
_r = getattr(self, item)
print(parent_path+'.'+item+' == '+_r.__repr__())
KaitaiStruct.__repr__ = _kaitai_repr
Enum.__repr__ = _enum_repr
KaitaiStruct.show = _kaitai_show
Enum.show = _enum_show
#msg.show()
class PhasorMessage(KaitaiStruct):
def __repr__(self):
_repr_list = [
"time=" + str(self.time)] if self.fracsec.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path=' '):
if self.fracsec.fraction_of_second:
print(parent_path+'.time == '+str(self.time))
_kaitai_show(self, parent_path)
def __init__(self, _io, _parent=None, _root=None, _mini_cfgs=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._pkt_pos = self._io.pos()
self.sync = self._root.SyncWord(self._io, self, self._root)
self.framesize = self._io.read_u2be()
self.idcode = self._io.read_u2be()
self._mini_cfg = _mini_cfgs.mini_cfg[self.idcode]
self.soc = self._io.read_u4be()
self.fracsec = self._root.Fracsec(self._io, self, self._root,
self._mini_cfg.time_base.time_base if self._mini_cfg else None)
_on = self.sync.frame_type.value
if _on == 0:
if self._mini_cfg:
self.data = Data(self._io, _mini_cfg=self._mini_cfg)
else:
self.data = self._io.read_bytes((self.framesize - 16))
elif _on == 3:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
_mini_cfgs.add_cfg(self.idcode, self.data)
elif _on == 4:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Command(io)
elif _on == 5:
_mini_cfgs.add_cfg(self.raw_pkt)
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg3(io)
elif _on == 2:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
elif _on == 1:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Header(io)
self.chk = self._io.read_u2be()
class SyncWord(KaitaiStruct):
|
class FrameTypeEnum(Enum):
data = 0
header = 1
cfg1 = 2
cfg2 = 3
cmd = 4
cfg3 = 5
|
class VersionNumberEnum(Enum):
c_37_118_2005 = 1
c_37_118_2_2011 = 2
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('1b', -86))
self.reserved = self._io.read_bits_int(1) != 0
self.frame_type = self._root.SyncWord.FrameTypeEnum(
self._io.read_bits_int(3))
self.version_number = self._root.SyncWord.VersionNumberEnum(
self._io.read_bits_int(4))
class Fracsec(KaitaiStruct):
def __repr__(self):
_repr_list = ["fraction_of_second=" +
str(self.fraction_of_second)] if self.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path):
if self.fraction_of_second:
print(parent_path+'.fraction_of_second == ' + str(self.fraction_of_second))
_kaitai_show(self, parent_path)
class LeapSecondDirectionEnum(Enum):
add = 0
delete = 1
class MsgTqEnum(Enum):
normal_operation_clock_locked_to_utc_traceable_source = 0
time_within_10_to_9_s_of_utc = 1
time_within_10_to_8_s_of_utc = 2
time_within_10_to_7_s_of_utc = 3
time_within_10_to_6_s_of_utc = 4
time_within_10_to_5_s_of_utc = 5
time_within_10_to_4_s_of_utc = 6
time_within_10_to_3_s_of_utc = 7
time_within_10_to_2_s_of_utc = 8
time_within_10_to_1_s_of_utc = 9
time_within_1_s_of_utc = 10
time_within_10_s_of_utc = 11
fault_clock_failure_time_not_reliable = 15
def __init__(self, _io, _parent=None, _root=None, _time_base=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._time_base = _time_base
self.reserved = self._io.read_bits_int(1) != 0
self.leap_second_direction = self._root.Fracsec.LeapSecondDirectionEnum(
self._io.read_bits_int(1))
self.leap_second_occurred = self._io.read_bits_int(1) != 0
self.leap_second_pending = self._io.read_bits_int(1) != 0
self.time_quality = self._root.Fracsec.MsgTqEnum(
self._io.read_bits_int(4))
self.raw_fraction_of_second = self._io.read_bits_int(24)
@property
def fraction_of_second(self):
if hasattr(self, '_m_fraction_of_seco
|
saisankargochhayat/algo_quest
|
leetcode/155. Min Stack/soln.py
|
Python
|
apache-2.0
| 664 | 0 |
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
from collections import deque
self.stack = deque()
self.stack.append((None, float('inf')))
|
def push(self, x: int) -> None:
self.stack.append((x, min(x, self.stack[-1][1])))
def pop(self) -> None:
return self.stack.pop()[0]
def top(self) -> int:
return self.stack[-1][0]
def getMin(self) -> int:
return self.stack[-1][1]
# Your MinStack object will be instantiated and called as such:
# obj = M
|
inStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
|
meidli/yabgp
|
yabgp/message/attribute/mpunreachnlri.py
|
Python
|
apache-2.0
| 9,951 | 0.002311 |
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under t
|
he Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.ap
|
ache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BGP Attribute MP_UNREACH_NLRI
"""
import struct
from yabgp.message.attribute import Attribute
from yabgp.message.attribute import AttributeFlag
from yabgp.message.attribute import AttributeID
from yabgp.message.attribute.nlri.ipv4_mpls_vpn import IPv4MPLSVPN
from yabgp.message.attribute.nlri.ipv6_mpls_vpn import IPv6MPLSVPN
from yabgp.message.attribute.nlri.ipv4_flowspec import IPv4FlowSpec
from yabgp.message.attribute.nlri.ipv6_unicast import IPv6Unicast
from yabgp.message.attribute.nlri.labeled_unicast.ipv4 import IPv4LabeledUnicast
from yabgp.message.attribute.nlri.evpn import EVPN
from yabgp.message.attribute.nlri.linkstate import BGPLS
from yabgp.message.attribute.nlri.ipv4_srte import IPv4SRTE
from yabgp.common import afn
from yabgp.common import safn
from yabgp.common import exception as excep
from yabgp.common import constants as bgp_cons
class MpUnReachNLRI(Attribute):
"""
This is an optional non-transitive attribute that can be used for the
purpose of withdrawing multiple unfeasible routes from service.
An UPDATE message that contains the MP_UNREACH_NLRI is not required
to carry any other path attributes.
MP_UNREACH_NLRI coding information
+---------------------------------------------------------+
| Address Family Identifier (2 octets) |
+---------------------------------------------------------+
| Subsequent Address Family Identifier (1 octet) |
+---------------------------------------------------------+
| Withdrawn Routes (variable) |
+---------------------------------------------------------+
"""
ID = AttributeID.MP_UNREACH_NLRI
FLAG = AttributeFlag.OPTIONAL + AttributeFlag.EXTENDED_LENGTH
@classmethod
def parse(cls, value):
try:
afi, safi = struct.unpack('!HB', value[0:3])
except Exception:
raise excep.UpdateMessageError(sub_error=bgp_cons.ERR_MSG_UPDATE_ATTR_LEN,
data='')
nlri_bin = value[3:]
# for IPv4
if afi == afn.AFNUM_INET:
# VPNv4
if safi == safn.SAFNUM_LAB_VPNUNICAST:
nlri = IPv4MPLSVPN.parse(nlri_bin, iswithdraw=True)
return dict(afi_safi=(afi, safi), withdraw=nlri)
# BGP flow spec
elif safi == safn.SAFNUM_FSPEC_RULE:
# if nlri length is greater than 240 bytes, it is encoded over 2 bytes
withdraw_list = []
while nlri_bin:
length = ord(nlri_bin[0:1])
if length >> 4 == 0xf and len(nlri_bin) > 2:
length = struct.unpack('!H', nlri_bin[:2])[0]
nlri_tmp = nlri_bin[2: length + 2]
nlri_bin = nlri_bin[length + 2:]
else:
nlri_tmp = nlri_bin[1: length + 1]
nlri_bin = nlri_bin[length + 1:]
nlri = IPv4FlowSpec.parse(nlri_tmp)
if nlri:
withdraw_list.append(nlri)
return dict(afi_safi=(afi, safi), withdraw=withdraw_list)
else:
return dict(afi_safi=(afn.AFNUM_INET, safi), withdraw=repr(nlri_bin))
# for ipv6
elif afi == afn.AFNUM_INET6:
# for ipv6 unicast
if safi == safn.SAFNUM_UNICAST:
return dict(afi_safi=(afi, safi), withdraw=IPv6Unicast.parse(nlri_data=nlri_bin))
elif safi == safn.SAFNUM_LAB_VPNUNICAST:
return dict(afi_safi=(afi, safi), withdraw=IPv6MPLSVPN.parse(value=nlri_bin, iswithdraw=True))
else:
return dict(afi_safi=(afi, safi), withdraw=repr(nlri_bin))
# for l2vpn
elif afi == afn.AFNUM_L2VPN:
# for evpn
if safi == safn.SAFNUM_EVPN:
return dict(afi_safi=(afi, safi), withdraw=EVPN.parse(nlri_data=nlri_bin))
else:
return dict(afi_safi=(afi, safi), withdraw=repr(nlri_bin))
# BGP LS
elif afi == afn.AFNUM_BGPLS:
if safi == safn.SAFNUM_BGPLS:
withdraw = BGPLS.parse(nlri_bin)
return dict(afi_safi=(afi, safi), withdraw=withdraw)
else:
pass
else:
return dict(afi_safi=(afi, safi), withdraw=repr(nlri_bin))
@classmethod
def construct(cls, value):
"""Construct a attribute
:param value: python dictionary
{'afi_safi': (1,128),
'withdraw': []
"""
afi, safi = value['afi_safi']
if afi == afn.AFNUM_INET:
if safi == safn.SAFNUM_LAB_VPNUNICAST: # MPLS VPN
nlri = IPv4MPLSVPN.construct(value['withdraw'], iswithdraw=True)
if nlri:
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
else:
return None
elif safi == safn.SAFNUM_FSPEC_RULE:
try:
nlri_list = value.get('withdraw') or []
if not nlri_list:
return None
nlri_hex = b''
nlri_hex += IPv4FlowSpec.construct(value=nlri_list)
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri_hex
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
except Exception:
raise excep.ConstructAttributeFailed(
reason='failed to construct attributes',
data=value
)
elif safi == safn.SAFNUM_SRTE:
try:
nlri_list = value.get('withdraw') or {}
if not nlri_list:
return None
nlri_hex = b''
nlri_hex += IPv4SRTE.construct(data=value['withdraw'])
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri_hex
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
except Exception:
raise excep.ConstructAttributeFailed(
reason='failed to construct attributes',
data=value
)
elif safi == safn.SAFNUM_MPLS_LABEL:
try:
nlri_list = value.get('withdraw') or []
if not nlri_list:
return None
nlri_hex = b''
flag = 'withdraw'
nlri_hex += IPv4LabeledUnicast.construct(nlri_list, flag)
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri_hex
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
except Exception:
raise excep.ConstructAttributeFailed(
reason='failed to construct attributes',
|
tuwiendsg/MELA
|
MELA-Extensions/MELA-ComplexCostEvaluationService/tests/mela-clients/submitServiceDescription.py
|
Python
|
apache-2.0
| 589 | 0.050934 |
import urllib, urllib2, sys, httplib
url = "/MELA/REST_WS"
HOST_IP="109.231.126.217:8180"
#HOST_IP="localhost:8180"
if __name__=='__main__':
connection = httplib.HTTPConnection(HOST_IP)
description_file = open("./costTest.xml", "r")
body_content = description_file.read()
headers={
'Content-Type':'app
|
lication/xml; charset=utf-8',
'Accept':'application/json, multipart/related'
}
connection.request('PUT', url+'/service', body=body_content,headers=headers,)
result = connection.getresponse()
print result.read()
| |
Eveler/libs
|
__Python__/edv/edv/imagescanner/backends/sane/__init__.py
|
Python
|
gpl-3.0
| 1,278 | 0.008607 |
"""SANE backend.
$Id$"""
import sane
from imagescanner.backends import base
class ScannerManager(base.ScannerManager):
def _refresh(self):
sel
|
f._devices = []
sane.init()
devices = sane.get_devices()
for dev in devices:
# Check if sane is able to open this device, if not just skip
try:
scanner = sane.open(dev[0])
scanner.close()
except:
continue
scanner_id = 'sane-%s' % len(self._devices)
scanner = Scanner(scanner_id, dev[0], dev[1], dev[2], dev[3])
self.
|
_devices.append(scanner)
sane.exit()
class Scanner(base.Scanner):
def __init__(self, scanner_id, device, manufacturer, name, description):
self.id = scanner_id
self.manufacturer = manufacturer
self.name = name
self.description = description
self._device = device
def __repr__(self):
return '<%s: %s - %s>' % (self.id, self.manufacturer, self.name)
def scan(self, dpi=200):
sane.init()
scanner = sane.open(self._device)
image = scanner.scan()
scanner.close()
sane.exit()
return image
|
naototty/devstack-vagrant-Ironic
|
boot-cirros.py
|
Python
|
apache-2.0
| 2,863 | 0.002445 |
#!/usr/bin/env python -u
'''
This script does the following
1. Connect the router to the public network
2. Add a public key
3. Boot a cirros instance
4. Attach a floating IP
'''
from __future__ import print_function
import datetime
import os.path
import socket
import sys
import time
from novaclient.v1_1 import client as novaclient
from neutronclient.v2_0 import client as neutronclient
auth_url = "http://192.168.27.100:35357/v2.0"
username = "demo"
password = "password"
tenant_name = "demo"
neutron = neutronclient.Client(auth_url=auth_url,
username=username,
password=password,
tenant_name=tenant_name)
nova = novaclient.Client(auth_url=auth_url,
username=username,
api_key=password,
project_id=tenant_name)
if not nova.keypairs.findall(name="mykey"):
print("Creating keypair: mykey...")
with open(os.path.expanduser('~/.ssh/id_rsa.pub')) as fpubkey:
nova.keypairs.create(name="mykey", public_key=fpubkey.read())
print("done")
print("Booting cirros instance...", end='')
image = nova.images.find(name="cirros-0.3.1-x86_64-uec")
flavor = nova.flavors.find(name="m1.tiny")
instance = nova.servers.create(name="cirros", image=image, flavor=flavor,
key_name="mykey")
# Poll at 5 second intervals, until the status is no longer 'BUILD'
status = instance.status
while status == 'BUILD':
time.sleep(5)
# Retrieve the instance again so the status field updates
instance = nova.servers.get(instance.id)
status = instance.status
print("done")
print("Creating floating ip...", end='')
# Get external network
ext_net, = [x for x in neutron.l
|
ist_networks()['networks']
if x['router:external']]
# Get the port corresponding to the instance
port, = [x for x in neutron.list_ports()['ports']
if x['device_id'] == instance.id]
# Create the floating ip
args = dict(floating_network_id=ext_net['id'],
port_id=port['id'])
ip_obj = neutron.create_floatingip(body={'floatingip': args})
print("done")
ip = ip_obj['floatingip']['floating_ip_address']
print("IP:{}".format(ip))
print("Waiting for ssh
|
to be ready on cirros instance...", end='')
start = datetime.datetime.now()
timeout = 120
end = start + datetime.timedelta(seconds=timeout)
port = 22
connect_timeout = 5
# From utilities/wait_for of ansible
while datetime.datetime.now() < end:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(connect_timeout)
try:
s.connect((ip, port))
s.shutdown(socket.SHUT_RDWR)
s.close()
print()
break
except:
time.sleep(1)
pass
else:
print("ssh server never came up!")
sys.exit(1)
|
vgrem/Office365-REST-Python-Client
|
office365/sharepoint/userprofiles/profileLoader.py
|
Python
|
mit
| 1,190 | 0.004202 |
from office365.runtime.client_object import ClientObject
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.paths.resource_path import ResourcePath
from office365.sharepoint.userprofiles.userProfile import UserProfile
class ProfileLoader(ClientObject):
def __init__(self, context):
super(ProfileLoader, self).__init__(context, ResourcePath("SP.UserProfiles.ProfileLoader.GetProfileLoader"))
@staticmethod
def get_profile_loader(context):
"""
:type: office365.sharepoint.client_context.ClientContext context
"""
result = ProfileLoader(context)
qry = ServiceOperationQuery(result, "GetProfileLoader", None, None, None, result)
qry.static = True
context.add_query(qry)
return result
def get_user_profile(self):
result = UserProfile(self.context, ResourcePath("GetUserProfile", self.reso
|
urce_path))
q
|
ry = ServiceOperationQuery(self, "GetUserProfile", None, None, None, result)
self.context.add_query(qry)
return result
@property
def entity_type_name(self):
return "SP.UserProfiles.ProfileLoader"
|
Ernsting/django-treebeard
|
treebeard/tests/tests.py
|
Python
|
apache-2.0
| 71,903 | 0.000793 |
"Unit/Functional tests"
import os
from django.contrib.admin.options import ModelAdmin
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from django.db import models, transaction
from django.contrib.auth.models import User
from django.db.models import Q
from django.conf import settings
from django import VERSION as DJANGO_VERSION
from django.utils.functional import wraps
from treebeard import numconv
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant, \
PathOverflow, MissingNodeOrderBy
from treebeard.forms import MoveNodeForm
from treebeard.tests.models import *
# ghetto app detection, there is probably some introspection method,
# but meh, this works
HAS_DJANGO_AUTH = 'django.contrib.auth' in settings.INSTALLED_APPS
BASE_DATA = [
{'data':{'desc':'1'}},
{'data':{'desc':'2'}, 'children':[
{'data':{'desc':'21'}},
{'data':{'desc':'22'}},
{'data':{'desc':'23'}, 'children':[
{'data':{'desc':'231'}},
]},
{'data':{'desc':'24'}},
]},
{'data':{'desc':'3'}},
{'data':{'desc':'4'}, 'children':[
{'data':{'desc':'41'}},
]},
]
def testtype(treetype, proxy):
def decorator(f):
@wraps(f)
def _testtype(self):
{'MP': self.set_MP,
'AL': self.set_AL,
'NS': self.set_NS}[treetype](proxy)
try:
f(self)
finally:
transaction.rollback()
self.model = None
self.sorted_model = None
self.dep_model = None
return _testtype
return decorator
def _load_test_methods(cls, proxy=True):
if proxy and DJANGO_VERSION >= (1, 1):
proxyopts = (False, True)
else:
proxyopts = (False,)
for m in dir(cls):
if not m.startswith('_multi_'):
continue
for t in ('MP', 'AL', 'NS'):
for p in proxyopts:
deco = testtype(t, p)
if p:
_proxy = '_proxy'
else:
_proxy = ''
name = 'test_%s%s_%s' % (t.lower(),
_proxy,
m.split('_', 2)[2])
setattr(cls, name, deco(getattr(cls, m)))
class TestTreeBase(TestCase):
def setUp(self):
self.set_MP()
self.unchanged = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
def set_MP(self, proxy=False):
if proxy and DJANGO_VERSION >= (1, 1):
self.model = MP_TestNode_Proxy
else:
self.model = MP_TestNode
self.sorted_model = MP_TestNodeSorted
self.dep_model = MP_TestNodeSomeDep
def set_NS(self, proxy=False):
if proxy and DJANGO_VERSION >= (1, 1):
self.model = NS_TestNode_Proxy
else:
self.model = NS_TestNode
self.sorted_model = NS_TestNodeSorted
self.dep_model = NS_TestNodeSomeDep
def set_AL(self, proxy=False):
if proxy and DJANGO_VERSION >= (1, 1):
self.model = AL_TestNode_Proxy
else:
self.model = AL_TestNode
self.sorted_model = AL_TestNodeSorted
self.dep_model = AL_TestNodeSomeDep
def got(self):
nsmodels = [NS_TestNode]
if DJANGO_VERSION >= (1, 1):
nsmodels.append(NS_TestNode_Proxy)
if self.model in nsmodels:
# this slows down nested sets tests quite a bit, but it has the
# advantage that we'll check the node edges are correct
d = {}
for tree_id, lft, rgt in self.model.objects.values_list('tree_id',
'lft',
'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for tree_id, got_edges in d.items():
self.assertEqual(len(got_edges), max(got_edges))
good_edges = range(1, len(got_edges) + 1)
self.assertEqual(sorted(got_edges), good_edges)
return [(o.desc, o.get_depth(), o.get_children_count())
for o in self.model.get_tree()]
def _assert_get_annotated_list(self, expected, parent=None):
got = [
(obj[0].desc, obj[1]['open'], obj[1]['close'], obj[1]['level'])
for obj in self.model.get_annotated_list(parent)]
self.assertEqual(expected, got)
class TestEmptyTree(TestTreeBase):
def _multi_load_bulk_empty(self):
ids = self.model.load_bulk(BASE_DATA)
got_descs = [obj.desc
for obj in self.model.objects.filter(id__in=ids)]
expected_descs = [x[0] for x in self.unchanged]
self.assertEqual(sorted(got_descs), sorted(expected_descs))
self.assertEqual(self.got(), self.unchanged)
def _multi_dump_bulk_empty(self):
self.assertEqual(self.model.dump_bulk(), [])
def _multi_add_root_empty(self):
self.model.add_root(desc='1')
expected = [(u'1', 1, 0)]
self.assertEqual(self.got(), expected)
def _multi_get_root_nodes_empty(self):
got = self.model.get_root_nodes()
expected = []
self.assertEqual([node.desc for node in got], expected)
def _multi_get_first_root_node_empty(self):
got = self.model.get_first_root_node()
self.assertEqual(got, None)
def _multi_get_last_root_node_empty(self):
got = self.model.get_last_root_node()
self.assertEqual(got, None)
def _multi_get_tree(self):
got = list(self.model.get_tree())
self.assertEqual(got, [])
def _multi_get_annotated_list(self):
expected = []
self._assert_get_annotated_list(expected)
class TestNonEmptyTree(TestTreeBase):
def setUp(self):
super(TestNonEmptyTree, self).setUp()
MP_TestNode.load_bulk(BASE_DATA)
AL_TestNode.load_bulk(BASE_DATA)
NS_TestNode.load_bulk(BASE_DATA)
class TestClassMethods(TestNonEmptyTree):
def setUp(self):
super(TestClassMethods, self).setUp()
def _multi_load_bulk_existing(self):
# inserting on an existing node
node = self.model.objects.get(desc=u'231')
ids = self.model.load_bulk(BASE_DATA, node)
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 4),
(u'1', 4, 0),
(u'2', 4, 4),
(u'21', 5, 0),
(u'22', 5, 0),
(u'23', 5, 1),
(u'231', 6, 0),
(u'24',
|
5, 0),
(u'3', 4, 0),
(u'4', 4, 1),
|
(u'41', 5, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
expected_descs = [u'1', u'2', u'21', u'22', u'23', u'231', u'24',
u'3', u'4', u'41']
got_descs = [obj.desc
for obj in self.model.objects.filter(id__in=ids)]
self.assertEqual(sorted(got_descs), sorted(expected_descs))
self.assertEqual(self.got(), expected)
def _multi_get_tree_all(self):
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in self.model.get_tree()]
self.assertEqual(got, self.unchanged)
def _multi_dump_bulk_all(self):
self.assertEqual(self.model.dump_bulk(keep_ids=False), BASE_DATA)
def _multi_get_tree_node(self):
node = self.model.objects.get(desc=u'231')
self.model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our
|
FedoraScientific/salome-yacs
|
src/salomeloader/salomeloader.py
|
Python
|
gpl-2.0
| 43,462 | 0.030578 |
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
"""This module is used to parse a supervision graph Salome (XML) and convert it into
YACS calculation schema
This parsing is done with SalomeLoader class and its method load.
"""
import sys,os
try:
import cElementTree as ElementTree
except ImportError:
import ElementTree
#from sets import Set
Set=set
import graph
import pilot
import SALOMERuntime
class UnknownKind(Exception):pass
#global variables
debug=0
typeMap={}
objref=None
_containers={}
currentProc=None
def typeName(name):
"""Replace :: in type name by /"""
return "/".join(name.split("::"))
streamTypes={
'0':"Unknown",
'1':"CALCIUM_integer",
'3':"CALCIUM_real",
}
class SalomeLoader:
"""This class parses a Salome graph (version 3.2.x) and converts it into YACS schema.
The loadxml method parses xml file and returns a SalomeProc object
The load method calls the loadxml method and creates a YACS object of class Proc
"""
def loadxml(self,filename):
"""
Parse a XML file from Salome SUPERV and return a list of SalomeProc objects.
"""
tree = ElementTree.ElementTree(file=filename)
root = tree.getroot()
if debug:print "root.tag:",root.tag,root
procs=[]
if root.tag == "dataflow":
#only one dataflow
dataflow=root
if debug:print dataflow
proc=SalomeProc(dataflow)
procs.append(proc)
else:
#one or more dataflows. The graph contains macros.
#All macros are defined at the same level in the XML file.
for dataflow in root.findall("dataflow"):
if debug:print dataflow
proc=SalomeProc(dataflow)
if debug:print "dataflow name:",proc.name
procs.append(proc)
return procs
def load(self,filename):
"""Parse a SUPERV XML file (method loadxml) and return a YACS Proc object.
"""
global typeMap,_containers,objref,currentProc
typeMap.clear()
objref=None
_containers.clear()
currentProc=None
procs=self.loadxml(filename)
#Split the master proc from the possible macros.
proc=procs.pop(0)
#proc.display()
#Put macros in macro_dict
macro_dict={}
for p in procs:
if debug:print "proc_name:",p.name,"coupled_node:",p.coupled_node
macro_dict[p.name]=p
if debug:print filename
yacsproc=ProcNode(proc,macro_dict,filename)
return yacsproc.
|
createNode()
class Container:
"""Class that defines a Salome Container"""
def __init__(self,mach,name):
self.mach=mach
self.name=name
self.components={}
def getName(self):
return self.mach+"/"+self.name
def getContainer(name):
if not name:
name="localhost/FactoryServer"
elif "/" not in name:
#no machine name: use local
|
host
name="localhost/"+name
return _containers.get(name)
def addContainer(name):
if not name:
mach="localhost"
name="FactoryServer"
elif "/" not in name:
#no machine name: use localhost for mach
mach="localhost"
else:
mach,name=name.split("/")
c=Container(mach,name)
_containers[mach+"/"+name]=c
return c
class Service:
"""Class for Service properties"""
class Parameter:
"""Class for Parameter properties"""
class Link:
"""Class for Link properties"""
class Data:
"""Class for Data properties"""
class Node:
"""Base class for all nodes """
label="Node: "
def __init__(self):
self.links=[] # list to store inputs as links
# a link has two attributes : from_node, the starting node
# to_node, the end node
self.datas=[]
self.inStreamLinks=[] #list of dataStream links connected to this node (in)
self.outStreamLinks=[] #list of dataStream links connected to this node (out)
self.node=None
def createNode(self):
raise NotImplementedError
def getInputPort(self,p):
return self.node.getInputPort(".".join(p.split("__")))
def getOutputPort(self,p):
if not self.node:
self.createNode()
return self.node.getOutputPort(".".join(p.split("__")))
def getInputDataStreamPort(self,p):
return self.node.getInputDataStreamPort(p)
def getOutputDataStreamPort(self,p):
return self.node.getOutputDataStreamPort(p)
def initPort(self,l):
if l.type == 7:
#double (CORBA::tk_double)
try:
self.getInputPort(l.tonodeparam).edInitDbl(l.value)
except:
reason="Problem in initialization, not expected type (double): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 3:
#int (CORBA::tk_long)
try:
self.getInputPort(l.tonodeparam).edInitInt(l.value)
except:
reason="Problem in initialization, not expected type (int): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 14:
#objref (CORBA::tk_objref)
try:
self.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (objref): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 18:
#string (CORBA::tk_string)
try:
self.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (string): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
else:
reason="Problem in initialization, not expected type (%s): %s %s" % (l.type,l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
class InlineNode(Node):
"""Inline Node salome : python function in self.codes[0]"""
def __init__(self):
Node.__init__(self)
self.codes=[]
def createNode(self):
r = pilot.getRuntime()
if self.fnames[0] == "?":
n=r.createScriptNode("",self.name)
else:
n=r.createFuncNode("",self.name)
n.setFname(self.fnames[0])
n.setScript(self.codes[0])
self.node=n
for para in self.service.inParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
n.edAddInputPort(para.name,typeMap[para.type])
for para in self.service.outParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
n.edAddOutputPort(para.name,typeMap[para.type])
for d in self.datas:
self.initPort(d)
return n
class ComputeNode(Node):
"""Compute Node Salome execute a component service"""
def createNode(self):
if self.node:
return self.node
r = pilot.getRuntime()
if self.container.component
|
qguv/loadaverage
|
load/__init__.py
|
Python
|
gpl-3.0
| 107 | 0 |
#!/usr/bin/env python3
# for
|
more
|
info, see github.com/qguv/loadaverage
from load.loadaverage import main
|
LonamiWebs/Py-Utils
|
logicmind/token_parser.py
|
Python
|
mit
| 3,318 | 0.003617 |
from tokens.andd import And
from tokens.expression import Expression
from tokens.iff import Iff
from tokens.kfalse import ConstantFalse
from tokens.ktrue import ConstantTrue
from tokens.nop import Not
from tokens.orr import Or
from tokens.then import Then
from tokens.variable import Variable
class TokenParser:
"""This parser only works with atomic expressions,
so parenthesis are needed everywhere to group items"""
@staticmethod
def parse_expression(string):
# Separate parenthesis so they're new tokens
# Also convert [ or { to the same parenthesis (
for s in '([{':
string = string.replace(s, ' ( ')
for s in ')]}':
string = string.replace(s, ' ) ')
# Get all operators so we can iterate over them
#
# Note that the order here is important. We first need to replace long
# expressions, such as '<->' with their single character representations.
#
# If we didn't do this, after we tried to separate the tokens from other
# expressions by adding spaces on both sides of the operator, '->' would
# break '<->' turning it into '< ->', which would not be recognised.
#
# We add spaces between the tokens so it's easy to split them and identify them.
# Another way would be to iterate over the string and finding the tokens. Once
# identified, they'd be put, in order, on a different list. However, this is
# not as simple as the currently used approach.
operators = [Iff, Then, Not, Or, And, ConstantTrue, ConstantFalse]
# Find al
|
l the representations on the string and add surrou
|
nding spaces,
# this will allow us to call 'string.split()' to separate variable names
# from the operators so the user doesn't need to enter them separated
for operator in operators:
for representation in operator.representations:
string = string.replace(representation, ' '+operator.single_char_representation+' ')
# Get all the tokens
words = string.split()
# Store the found nested expressions on the stack
expressions_stack = [Expression()]
for w in words:
done = False
for operator in operators:
# We replaced all the operator with their single character representations. We
# don't need to check whether the current word (representation) is any of the
# available representations for this operator, since it's the single-character one.
if w == operator.single_char_representation:
expressions_stack[-1].add_token(operator())
done = True
break
if done:
pass
elif w == '(':
expressions_stack.append(Expression())
elif w == ')':
e = expressions_stack.pop()
expressions_stack[-1].add_token(e)
else:
expressions_stack[-1].add_token(Variable(w))
# Tokenize the top expression (this will also tokenize its children)
expressions_stack[0].tokenize()
# Return the top expression once it's completely valid
return expressions_stack[0]
|
cpe/VAMDC-VALD
|
nodes/cdms/node/forms.py
|
Python
|
gpl-3.0
| 1,963 | 0 |
from node import models
from django.forms import ModelForm
from . import cdmsportalfunc as cpf
from django.core.exceptions import ValidationError
from django import forms
class MoleculeForm(ModelForm):
class Meta:
model = models.Molecules
fields = '__all__'
class SpecieForm(ModelForm):
datearchived = forms.DateField(
widget=forms.TextInput(attrs={'readonly': 'readonly'})
)
dateactivated = forms.DateField(
widget=forms.TextInput(attrs={'readonly': 'readonly'})
)
class Meta:
model = models.Species
fields = '__all__'
class FilterForm(ModelForm):
class Meta:
model = models.QuantumNumbersFilter
fields = '__all__'
class XsamsConversionForm(forms.Form):
inurl = forms.URLField(
label='Input URL',
required=False,
widget=forms.TextInput(
attrs={'size': 50,
|
'title': 'Paste here a URL that delivers an XSAMS '
'document.',
}))
infile = forms.FileField()
format = forms.ChoiceField(
choices=[("RAD 3D", "RAD 3D"), ("CSV", "CSV")], )
def clean(self):
infile = self.cleaned_data.get('infile')
inurl = self
|
.cleaned_data.get('inurl')
if (infile and inurl):
raise ValidationError('Give either input file or URL!')
if inurl:
try:
data = cpf.urlopen(inurl)
except Exception as err:
raise ValidationError('Could not open given URL: %s' % err)
elif infile:
data = infile
else:
raise ValidationError('Give either input file or URL!')
try:
self.cleaned_data['result'] = cpf.applyStylesheet2File(data)
except Exception as err:
raise ValidationError('Could not transform XML file: %s' % err)
return self.cleaned_data
|
bzhou26/leetcode_sol
|
p675_Cut_Off_Trees_for_Golf_Event.py
|
Python
|
mit
| 3,256 | 0.002764 |
'''
- Leetcode problem: 675
- Difficulty: Hard
- Brief problem description:
You are asked to cut off trees in a forest for a golf event. The forest is represented as a non-negative 2D map, in this map:
0 represents the obstacle can't be reached.
1 represents the ground can be walked through
|
.
The place with number bigger than 1 represents a tree can be walked through, and this positive number represents the tree's height.
In one step you can walk in any of the four directions top, bottom, left and r
|
ight also when standing in a point which is a tree you can decide whether or not to cut off the tree.
You are asked to cut off all the trees in this forest in the order of tree's height - always cut off the tree with lowest height first. And after cutting, the original place has the tree will become a grass (value 1).
You will start from the point (0, 0) and you should output the minimum steps you need to walk to cut off all the trees. If you can't cut off all the trees, output -1 in that situation.
You are guaranteed that no two trees have the same height and there is at least one tree needs to be cut off.
Example 1:
Input:
[
[1,2,3],
[0,0,4],
[7,6,5]
]
Output: 6
Example 2:
Input:
[
[1,2,3],
[0,0,0],
[7,6,5]
]
Output: -1
Example 3:
Input:
[
[2,3,4],
[0,0,5],
[8,7,6]
]
Output: 6
Explanation: You started from the point (0,0) and you can cut off the tree in (0,0) directly without walking.
Constraints:
1 <= forest.length <= 50
1 <= forest[i].length <= 50
0 <= forest[i][j] <= 10^9
- Solution Summary:
1. Sort the trees by tree height
2. Calculate the shortest path by BFS
Time Complexity: O((RC)^2)
Space Complexity: O(R*C)
- Used Resources:
--- Bo Zhou
'''
class Solution:
def cutOffTree(self, forest: List[List[int]]) -> int:
treeList = []
for i in range(len(forest)):
for j in range(len(forest[0])):
if forest[i][j] > 1:
treeList.append((forest[i][j], (i, j)))
treeList.sort(key=lambda x: x[0])
totalDist = 0
startPoint = (0, 0)
for tree in treeList:
dist = self.shortestPath(forest, startPoint[0], startPoint[1], tree[1][0], tree[1][1])
if dist == -1:
return -1
else:
totalDist += dist
startPoint = (tree[1][0], tree[1][1])
return totalDist
def shortestPath(self, forest, sx, sy, tx, ty) -> int:
if sx == tx and sy == ty:
return 0
directs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
visited = set()
dq = deque()
dq.append((sx, sy))
step = 0
while dq:
n = len(dq)
step += 1
for i in range(n):
x, y = dq.popleft()
for dx, dy in directs:
newX = x + dx
newY = y + dy
if newX == tx and newY == ty:
return step
elif 0 <= newX < len(forest) and 0 <= newY < len(forest[0]) and (newX, newY) not in visited and \
forest[newX][newY] != 0:
visited.add((newX, newY))
dq.append((newX, newY))
return -1
|
oyamad/QuantEcon.py
|
quantecon/game_theory/tests/test_random.py
|
Python
|
bsd-3-clause
| 2,411 | 0 |
"""
Tests for game_theory/random.py
"""
import numpy as np
from numpy.testing import assert_allclose, assert_raises
from nose.tools import eq_, ok_
from quantecon.game_theory import (
random_game, covariance_game, random_pure_actions, random_mixed_actions
)
def test_random_game():
nums_actions = (2, 3, 4)
g = random_game(nums_actions)
eq_(g.nums_actions, nums_actions)
def test_covariance_gam
|
e():
nums_actions = (2, 3, 4)
N = len(nums_actions)
rho = 0.5
g = covariance_game(nums_actions, rho=rho)
eq_(g.nums_actions, nums_actions)
rho = 1
g = covariance_game(nums_actions, rho=rho)
for a in np.ndindex(*nums_actions):
for i in range(N-1
|
):
payoff_profile = g.payoff_profile_array[a]
assert_allclose(payoff_profile[i], payoff_profile[-1], atol=1e-8)
rho = -1 / (N - 1)
g = covariance_game(nums_actions, rho=rho)
for a in np.ndindex(*nums_actions):
assert_allclose(g.payoff_profile_array.sum(axis=-1),
np.zeros(nums_actions),
atol=1e-10)
def test_random_game_value_error():
nums_actions = () # empty
assert_raises(ValueError, random_game, nums_actions)
def test_covariance_game_value_error():
nums_actions = () # empty
assert_raises(ValueError, covariance_game, nums_actions, rho=0)
nums_actions = (2,) # length one
assert_raises(ValueError, covariance_game, nums_actions, rho=0)
nums_actions = (2, 3, 4)
rho = 1.1 # > 1
assert_raises(ValueError, covariance_game, nums_actions, rho)
rho = -1 # < -1/(N-1)
assert_raises(ValueError, covariance_game, nums_actions, rho)
def test_random_pure_actions():
nums_actions = (2, 3, 4)
N = len(nums_actions)
seed = 1234
action_profiles = [
random_pure_actions(nums_actions, seed) for i in range(2)
]
for i in range(N):
ok_(action_profiles[0][i] < nums_actions[i])
eq_(action_profiles[0], action_profiles[1])
def test_random_mixed_actions():
nums_actions = (2, 3, 4)
seed = 1234
action_profile = random_mixed_actions(nums_actions, seed)
eq_(tuple([len(action) for action in action_profile]), nums_actions)
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
Yash3667/CipherAnalysis
|
Analysis/Frequency.py
|
Python
|
mit
| 3,272 | 0.034535 |
"""
FREQUENCY FILE
-> Contains function pertaining to analyze a file based on frequency of characters or words.
"""
def call(Arguments):
"""
Entry point for all calls pertaining to frequency analysis
"""
# Storing arguments in a dictionary for easier reference
ArgumentsDictionary = {
"NAME" : Arguments[0],
"OPTION" : Arguments[1],
"KEY" : Arguments[2],
"FILE" : Arguments[3],
}
# Storing functions in a dictionary for simplicity
FunctionsDictionary = {
"c" : getCharacterFrequency,
"w" : getWordFrequency,
}
# Since the firs
|
t two letter are "-" and "f" respectively
Option = ArgumentsDictionary["OPTION"][2:]
# Call the frequencyWork function to do the actual work
if Option in FunctionsDictionary:
return fr
|
equencyWork(FunctionsDictionary[Option], ArgumentsDictionary["FILE"], ArgumentsDictionary["KEY"])
else:
return 0
def frequencyWork(FunctionObject, FileName, Key):
"""
This function stores the data inside FILE into a List. Then calls the FunctionObject with the List and the Key
"""
# Read the enitre file and store it in a variable
try:
with open(FileName, "r") as File:
FileContents = File.read()
except:
print "Couldn't Open File"
return 0
# Split the contents of the file into a list
FileContents = FileContents.split()
# Call FunctionObject to work on the list with respect to the Key
return FunctionObject(FileContents, Key)
def getCharacterFrequency(List, Key):
"""
Analyses the List to detect occurences of Key (character)
"""
UpperToLowerOffset = 32 # Difference in ASCII value between Upper-Case and Lower-Case alphabets
# Analayze occurences for all characters
if Key == "*all":
# "A" = 65, "Z" = 90
for Number in range(65, 91):
Upper = chr(Number)
Lower = chr(Number + UpperToLowerOffset)
Count = {"UPPER" : 0, "LOWER" : 0}
# We have to check every word in the list
for Word in List:
if Upper in Word or Lower in Word:
# Since they exist in the word, we check every single character
for Character in Word:
if Upper == Character:
Count["UPPER"] += 1
elif Lower == Character:
Count["LOWER"] += 1
# Print Data for this Number
if Count["UPPER"] or Count["LOWER"]:
print "Count of \'%s\': %d" % (Lower, Count["LOWER"])
print "Count of \'%s\': %d" % (Upper, Count["UPPER"])
print
# Analyze Occurence for KEY
else:
# This is get character. Hence, even if the user passes a string, we use only the first letter of the string
Key = Key[0]
Count = 0
# Check for every word in the list
for Word in List:
if Key in Word:
# Since Key exists in Word, analyse the characters of Word for Key
for Character in Word:
if Key == Character:
Count += 1
print "Count of \'%s\': %d" % (Key, Count)
print
return 1
def getWordFrequency(List, Key):
"""
Analyses List to detect occurences of Key (word)
"""
Count = 0
# Check every word in the list
for Word in List:
# Remove Periods and Comma's from Word if any, and then compare with Key
if "." in Word or "," in Word:
Word = Word[:len(Word) - 1]
if Key == Word:
Count += 1
print "Count of \"%s\": %d" % (Key, Count)
print
return 1
""" END - FREQUENCY FILE """
|
rackerlabs/python-proboscis
|
proboscis/compatability/exceptions_2_5.py
|
Python
|
apache-2.0
| 1,186 | 0.000843 |
# Copyright (c) 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to
|
in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either expre
|
ss or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def capture_exception(body_func, *except_type):
try:
body_func()
return None
except except_type, e:
return e
def capture_type_error(func):
try:
func()
except TypeError, te:
msg = str(te)
if ("takes exactly 1 argument" in msg and "(0 given)" in msg) \
or "instance as first argument (got nothing instead)" in msg:
from proboscis.core import ProboscisTestMethodClassNotDecorated
raise ProboscisTestMethodClassNotDecorated()
else:
raise
|
MaximeBiset/care4care
|
main/migrations/0036_auto_20141204_1818.py
|
Python
|
agpl-3.0
| 1,581 | 0.001898 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import easy_thumbnails.fields
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0035_auto_20141204_1708'),
]
operations = [
migrations.AlterField(
model_name='user',
name='how_found',
field=multiselectfield.db.fields.MultiSelectField(choices=[('internet', 'The Internet'), ('show', 'A presentation, brochure, flyer,... '), ('branch', 'The local branch'), ('member', 'Another member'), ('friends', 'Friends or family'), ('other', 'Other ...')], max_length=41, verbose_name='How did you hear about care4care ?'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='p
|
hoto',
field=easy_thumbnails.fields.ThumbnailerImageField(upload_to='photos/', default='photos/default_avatar.png'),
preserve_default=True,
|
),
migrations.AlterField(
model_name='verifieduser',
name='offered_job',
field=multiselectfield.db.fields.MultiSelectField(choices=[('1', 'Visit home'), ('2', 'Companionship'), ('3', 'Transport by car'), ('4', 'Shopping'), ('5', 'House sitting'), ('6', 'Manual jobs'), ('7', 'Gardening'), ('8', 'Pet sitting'), ('9', 'Personal care'), ('a', 'Administrative'), ('b', 'Other ...')], max_length=21, verbose_name='What jobs you want to do?', blank=True),
preserve_default=True,
),
]
|
qianqians/meter
|
xlrd/xldate.py
|
Python
|
lgpl-2.1
| 7,895 | 0.004813 |
# -*- coding: cp1252 -*-
# No part of the content of this file was derived from the works of David Giffin.
##
# <p>Copyright © 2005-2008 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
#
# <p>Provides function(s) for dealing with Microsoft Excel dates.</p>
##
# 2008-10-18 SJM Fix bug in xldate_from_date_tuple (affected some years after 2099)
# The conve
|
rsion from days to (year, month, day) starts with
# an integral "julian day number" aka JDN.
# FWIW, JDN 0 corresponds to noon on M
|
onday November 24 in Gregorian year -4713.
# More importantly:
# Noon on Gregorian 1900-03-01 (day 61 in the 1900-based system) is JDN 2415080.0
# Noon on Gregorian 1904-01-02 (day 1 in the 1904-based system) is JDN 2416482.0
import datetime
_JDN_delta = (2415080 - 61, 2416482 - 1)
assert _JDN_delta[1] - _JDN_delta[0] == 1462
# Pre-calculate the datetime epochs for efficiency.
epoch_1904 = datetime.datetime(1904, 1, 1)
epoch_1900 = datetime.datetime(1899, 12, 31)
epoch_1900_minus_1 = datetime.datetime(1899, 12, 30)
class XLDateError(ValueError): pass
class XLDateNegative(XLDateError): pass
class XLDateAmbiguous(XLDateError): pass
class XLDateTooLarge(XLDateError): pass
class XLDateBadDatemode(XLDateError): pass
class XLDateBadTuple(XLDateError): pass
_XLDAYS_TOO_LARGE = (2958466, 2958466 - 1462) # This is equivalent to 10000-01-01
##
# Convert an Excel number (presumed to represent a date, a datetime or a time) into
# a tuple suitable for feeding to datetime or mx.DateTime constructors.
# @param xldate The Excel number
# @param datemode 0: 1900-based, 1: 1904-based.
# <br>WARNING: when using this function to
# interpret the contents of a workbook, you should pass in the Book.datemode
# attribute of that workbook. Whether
# the workbook has ever been anywhere near a Macintosh is irrelevant.
# @return Gregorian (year, month, day, hour, minute, nearest_second).
# <br>Special case: if 0.0 <= xldate < 1.0, it is assumed to represent a time;
# (0, 0, 0, hour, minute, second) will be returned.
# <br>Note: 1904-01-01 is not regarded as a valid date in the datemode 1 system; its "serial number"
# is zero.
# @throws XLDateNegative xldate < 0.00
# @throws XLDateAmbiguous The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0)
# @throws XLDateTooLarge Gregorian year 10000 or later
# @throws XLDateBadDatemode datemode arg is neither 0 nor 1
# @throws XLDateError Covers the 4 specific errors
def xldate_as_tuple(xldate, datemode):
if datemode not in (0, 1):
raise XLDateBadDatemode(datemode)
if xldate == 0.00:
return (0, 0, 0, 0, 0, 0)
if xldate < 0.00:
raise XLDateNegative(xldate)
xldays = int(xldate)
frac = xldate - xldays
seconds = int(round(frac * 86400.0))
assert 0 <= seconds <= 86400
if seconds == 86400:
hour = minute = second = 0
xldays += 1
else:
# second = seconds % 60; minutes = seconds // 60
minutes, second = divmod(seconds, 60)
# minute = minutes % 60; hour = minutes // 60
hour, minute = divmod(minutes, 60)
if xldays >= _XLDAYS_TOO_LARGE[datemode]:
raise XLDateTooLarge(xldate)
if xldays == 0:
return (0, 0, 0, hour, minute, second)
if xldays < 61 and datemode == 0:
raise XLDateAmbiguous(xldate)
jdn = xldays + _JDN_delta[datemode]
yreg = ((((jdn * 4 + 274277) // 146097) * 3 // 4) + jdn + 1363) * 4 + 3
mp = ((yreg % 1461) // 4) * 535 + 333
d = ((mp % 16384) // 535) + 1
# mp /= 16384
mp >>= 14
if mp >= 10:
return ((yreg // 1461) - 4715, mp - 9, d, hour, minute, second)
else:
return ((yreg // 1461) - 4716, mp + 3, d, hour, minute, second)
##
# Convert an Excel date/time number into a datetime.datetime object.
#
# @param xldate The Excel number
# @param datemode 0: 1900-based, 1: 1904-based.
#
# @return a datetime.datetime() object.
#
def xldate_as_datetime(xldate, datemode):
"""Convert an Excel date/time number into a datetime.datetime object."""
# Set the epoch based on the 1900/1904 datemode.
if datemode:
epoch = epoch_1904
else:
if xldate < 60:
epoch = epoch_1900
else:
# Workaround Excel 1900 leap year bug by adjusting the epoch.
epoch = epoch_1900_minus_1
# The integer part of the Excel date stores the number of days since
# the epoch and the fractional part stores the percentage of the day.
days = int(xldate)
fraction = xldate - days
# Get the the integer and decimal seconds in Excel's millisecond resolution.
seconds = int(round(fraction * 86400000.0))
seconds, milliseconds = divmod(seconds, 1000)
return epoch + datetime.timedelta(days, seconds, 0, milliseconds)
# === conversions from date/time to xl numbers
def _leap(y):
if y % 4: return 0
if y % 100: return 1
if y % 400: return 0
return 1
_days_in_month = (None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
##
# Convert a date tuple (year, month, day) to an Excel date.
# @param year Gregorian year.
# @param month 1 <= month <= 12
# @param day 1 <= day <= last day of that (year, month)
# @param datemode 0: 1900-based, 1: 1904-based.
# @throws XLDateAmbiguous The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0)
# @throws XLDateBadDatemode datemode arg is neither 0 nor 1
# @throws XLDateBadTuple (year, month, day) is too early/late or has invalid component(s)
# @throws XLDateError Covers the specific errors
def xldate_from_date_tuple(date_tuple, datemode):
"""Create an excel date from a tuple of (year, month, day)"""
year, month, day = date_tuple
if datemode not in (0, 1):
raise XLDateBadDatemode(datemode)
if year == 0 and month == 0 and day == 0:
return 0.00
if not (1900 <= year <= 9999):
raise XLDateBadTuple("Invalid year: %r" % ((year, month, day),))
if not (1 <= month <= 12):
raise XLDateBadTuple("Invalid month: %r" % ((year, month, day),))
if day < 1 \
or (day > _days_in_month[month] and not(day == 29 and month == 2 and _leap(year))):
raise XLDateBadTuple("Invalid day: %r" % ((year, month, day),))
Yp = year + 4716
M = month
if M <= 2:
Yp = Yp - 1
Mp = M + 9
else:
Mp = M - 3
jdn = (1461 * Yp // 4) + ((979 * Mp + 16) // 32) + \
day - 1364 - (((Yp + 184) // 100) * 3 // 4)
xldays = jdn - _JDN_delta[datemode]
if xldays <= 0:
raise XLDateBadTuple("Invalid (year, month, day): %r" % ((year, month, day),))
if xldays < 61 and datemode == 0:
raise XLDateAmbiguous("Before 1900-03-01: %r" % ((year, month, day),))
return float(xldays)
##
# Convert a time tuple (hour, minute, second) to an Excel "date" value (fraction of a day).
# @param hour 0 <= hour < 24
# @param minute 0 <= minute < 60
# @param second 0 <= second < 60
# @throws XLDateBadTuple Out-of-range hour, minute, or second
def xldate_from_time_tuple(time_tuple):
"""Create an excel date from a tuple of (hour, minute, second)"""
hour, minute, second = time_tuple
if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:
return ((second / 60.0 + minute) / 60.0 + hour) / 24.0
raise XLDateBadTuple("Invalid (hour, minute, second): %r" % ((hour, minute, second),))
##
# Convert a datetime tuple (year, month, day, hour, minute, second) to an Excel date value.
# For more details, refer to other xldate_from_*_tuple functions.
# @param datetime_tuple (year, month, day, hour, minute, second)
# @param datemode 0: 1900-based, 1: 1904-based.
def xldate_from_datetime_tuple(datetime_tuple, datemode):
return (
xldate_from_date_tuple(datetime_tuple[:3], datemode)
+
xldate_from_time_tuple(datetime_tuple[3:])
)
|
Comunitea/CMNT_004_15
|
project-addons/custom_partner/__manifest__.py
|
Python
|
agpl-3.0
| 1,917 | 0.000522 |
##############################################################################
#
# Copyright (C) 2015 Comunitea Servicios Tecnológicos All Rights Reserved
# $Omar Castiñeira Saavedra <omar@pcomunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Partner custom",
'version': '1.0',
'category': 'Custom',
'description': """Several little customizations in partners""",
'author': 'Comunitea Se
|
rvicios Tecnológicos',
'website': 'www.comunitea.com',
"depends": ['base', 'sale', 'l10n_es_partner', 'account',
'base_partner_sequence', 'stock', 'account_credit_control',
'purchase', 'prospective_customer', 'account_due_list',
'customer_lost', 'sale_margin_percentage', 'contacts',
'crm_phone_validation',
|
'commercial_rules', 'account_fiscal_position_partner_type'],
"data": ["views/invoice_pending_sales_view.xml",
"views/partner_view.xml",
"views/sale_view.xml",
"security/ir.model.access.csv",
"data/custom_partner_data.xml",
"security/groups.xml",
"data/parameters.xml"],
"installable": True
}
|
botswana-harvard/microbiome
|
microbiome/apps/mb_list/models/infant_vaccines.py
|
Python
|
gpl-2.0
| 224 | 0 |
from edc_base.model.models import BaseListModel
class InfantVaccines (BaseListModel):
class Met
|
a:
app_label = '
|
mb_list'
verbose_name = "Infant Vaccines"
verbose_name_plural = "Infant Vaccines"
|
w1ll1am23/home-assistant
|
homeassistant/components/tile/device_tracker.py
|
Python
|
apache-2.0
| 4,346 | 0.00046 |
"""Support for Tile device trackers."""
import logging
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.components.device_tracker.const import SOURCE_TYPE_GPS
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import DATA_COORDINATOR, DATA_TILE, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_ALTITUDE = "altitude"
ATTR_CONNECTION_STATE = "connection_state"
ATTR_IS_DEAD = "is_dead"
ATTR_IS_LOST = "is_lost"
ATTR_LAST_LOST_TIMESTAMP = "last_lost_timestamp"
ATTR_RING_STATE = "ring_state"
ATTR_TILE_NAME = "tile_name"
ATTR_VOIP_STATE = "voip_state"
DEFAULT_ATTRIBUTION = "Data provid
|
ed by Tile"
DEFA
|
ULT_ICON = "mdi:view-grid"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Tile device trackers."""
async_add_entities(
[
TileDeviceTracker(
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][tile_uuid], tile
)
for tile_uuid, tile in hass.data[DOMAIN][DATA_TILE][entry.entry_id].items()
]
)
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Detect a legacy configuration and import it."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: config[CONF_USERNAME],
CONF_PASSWORD: config[CONF_PASSWORD],
},
)
)
_LOGGER.info(
"Your Tile configuration has been imported into the UI; "
"please remove it from configuration.yaml"
)
return True
class TileDeviceTracker(CoordinatorEntity, TrackerEntity):
"""Representation of a network infrastructure device."""
def __init__(self, coordinator, tile):
"""Initialize."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._tile = tile
@property
def available(self):
"""Return if entity is available."""
return self.coordinator.last_update_success and not self._tile.dead
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return None
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return DEFAULT_ICON
@property
def location_accuracy(self):
"""Return the location accuracy of the device.
Value in meters.
"""
return self._tile.accuracy
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
return self._tile.latitude
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
return self._tile.longitude
@property
def name(self):
"""Return the name."""
return self._tile.name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"tile_{self._tile.uuid}"
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@callback
def _handle_coordinator_update(self):
"""Respond to a DataUpdateCoordinator update."""
self._update_from_latest_data()
self.async_write_ha_state()
@callback
def _update_from_latest_data(self):
"""Update the entity from the latest data."""
self._attrs.update(
{
ATTR_ALTITUDE: self._tile.altitude,
ATTR_IS_LOST: self._tile.lost,
ATTR_LAST_LOST_TIMESTAMP: self._tile.lost_timestamp,
ATTR_RING_STATE: self._tile.ring_state,
ATTR_VOIP_STATE: self._tile.voip_state,
}
)
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._update_from_latest_data()
|
vganapath/rally
|
tests/unit/plugins/common/hook/test_sys_call.py
|
Python
|
apache-2.0
| 4,165 | 0 |
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import jsonschema
import mock
from rally import consts
from rally.plugins.common.hook import sys_call
from rally.task import hook
from tests.unit import fakes
from tests.unit import test
class SysCallHookTestCase(test.TestCase):
def test_validate(self):
hook.Hook.validate(
{
"name": "sys_call",
"description": "list folder",
"args": "ls",
"trigger": {
"name": "event",
"args": {
"unit": "iteration",
"at": [10]
}
}
}
)
def test_validate_error(self):
conf = {
"name": "sys_call",
"description": "list folder",
"args": {
"cmd": 50,
},
"trigger": {
"name": "event",
"args": {
"unit": "iteration",
"at": [10]
|
}
}
}
self.assertRaises(
jsonschema.ValidationError, hook.Hook.validate, conf)
@mock.patch("rally.common.utils.Timer", side_
|
effect=fakes.FakeTimer)
@mock.patch("subprocess.Popen")
def test_run(self, mock_popen, mock_timer):
popen_instance = mock_popen.return_value
popen_instance.returncode = 0
task = mock.MagicMock()
sys_call_hook = sys_call.SysCallHook(task, "/bin/bash -c 'ls'",
{"iteration": 1}, "dummy_action")
sys_call_hook.run_sync()
sys_call_hook.validate_result_schema()
self.assertEqual(
{
"hook": "sys_call",
"description": "dummy_action",
"triggered_by": {"iteration": 1},
"started_at": fakes.FakeTimer().timestamp(),
"finished_at": fakes.FakeTimer().finish_timestamp(),
"status": consts.HookStatus.SUCCESS,
}, sys_call_hook.result())
mock_popen.assert_called_once_with(
["/bin/bash", "-c", "ls"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
@mock.patch("subprocess.Popen")
def test_run_error(self, mock_popen, mock_timer):
popen_instance = mock_popen.return_value
popen_instance.returncode = 1
popen_instance.stdout.read.return_value = b"No such file or directory"
task = mock.MagicMock()
sys_call_hook = sys_call.SysCallHook(task, "/bin/bash -c 'ls'",
{"iteration": 1}, "dummy_action")
sys_call_hook.run_sync()
sys_call_hook.validate_result_schema()
self.assertEqual(
{
"hook": "sys_call",
"description": "dummy_action",
"triggered_by": {"iteration": 1},
"started_at": fakes.FakeTimer().timestamp(),
"finished_at": fakes.FakeTimer().finish_timestamp(),
"status": consts.HookStatus.FAILED,
"error": [
"n/a",
"Subprocess returned 1",
"No such file or directory",
]
}, sys_call_hook.result())
mock_popen.assert_called_once_with(
["/bin/bash", "-c", "ls"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
|
DownGoat/PyFeedReader
|
db_init.py
|
Python
|
gpl-3.0
| 79 | 0 |
__autho
|
r__ = 'DownGoat'
from pyfeedreader import database
database.i
|
nit_db()
|
aequitas/munerator
|
tests/test_context.py
|
Python
|
mit
| 566 | 0 |
import pytest
from munerator.context import GameContext
from mock import Mock
@pytest.fixture
def gc():
gc = GameContext(Mock(), Mock(), Mock())
return gc
def test_player_name_client_id_translation(gc):
client_id = '1'
player_name = 'testplayer'
gc.
|
clients = {
client_id: {
'name': player_name,
'client_id': client_id
}
}
data = {
'kind': 'say',
'player_name': player_name
}
contexted_data = gc.handle_event(data)
assert contexted_data['
|
client_id'] == client_id
|
ctrlaltdel/neutrinator
|
vendor/keystoneauth1/tests/unit/loading/test_v3.py
|
Python
|
gpl-3.0
| 16,308 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import uuid
from keystoneauth1 import exceptions
from keystoneauth1 import loading
from keystoneauth1.tests.unit.loading import utils
class V3PasswordTests(utils.TestCase):
def setUp(self):
super(V3PasswordTests, self).setUp()
self.auth_url = uuid.uuid4().hex
def create(self, **kwargs):
kwargs.setdefault('auth_url', self.auth_url)
loader = loading.get_plugin_loader('v3password')
return loader.load_from_options(**kwargs)
def test_basic(self):
username = uuid.uuid4().hex
user_domain_id = uuid.uuid4().hex
password = uuid.uuid4().hex
project_name = uuid.uuid4().hex
project_domain_id = uuid.uuid4().hex
p = self.create(username=username,
user_domain_id=user_domain_id,
project_name=project_name,
project_domain_id=project_domain_id,
password=password)
pw_method = p.auth_methods[0]
self.assertEqual(username, pw_method.username)
self.assertEqual(user_domain_id, pw_method.user_domain_id)
self.assertEqual(password, pw_method.password)
self.assertEqual(project_name, p.project_name)
self.assertEqual(project_domain_id, p.project_domain_id)
def test_without_user_domain(self):
self.assertRaises(exceptions.OptionError,
self.create,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
def test_without_project_domain(self):
self.assertRaises(exceptions.OptionError,
self.create,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex,
user_domain_id=uuid.uuid4().hex,
project_name=uuid.uuid4().hex)
class TOTPTests(utils.TestCase):
def setUp(self):
super(TOTPTests, self).setUp()
self.auth_url = uuid.uuid4().hex
def create(self, **kwargs):
kwargs.setdefault('auth_url', self.auth_url)
loader = loading.get_plugin_loader('v3totp')
return loader.load_from_options(**kwargs)
def test_basic(self):
username = uuid.uuid4().hex
user_domain_id = uuid.uuid4().hex
# passcode is 6 digits
passcode = ''.join(str(random.randint(0, 9)) for x in range(6))
project_name = uuid.uuid4().hex
project_domain_id = uuid.uuid4().hex
p = self.create(username=username,
user_domain_id=user_domain_id,
project_name=project_name,
project_domain_id=project_domain_id,
passcode=passcode)
totp_method = p.auth_methods[0]
self.assertEqual(username, totp_method.username)
self.assertEqual(user_domain_id, totp_method.user_domain_id)
self.assertEqual(passcode, totp_method.passcode)
self.assertEqual(project_name, p.project_name)
self.assertEqual(project_domain_id, p.project_domain_id)
def test_without_user_domain(self):
self.assertRaises(exceptions.OptionError,
self.create,
username=uuid.uuid4().hex,
passcode=uuid.uuid4().hex)
def test_without_project_domain(self):
self.assertRaises(exceptions.OptionError,
self.create,
username=uuid.uuid4().hex,
passcode=uuid.uuid4().hex,
user_domain_id=uuid.uuid4().hex,
project_name=uuid.uuid4().hex)
class OpenIDConnectBaseTests(object):
plugin_name = None
def setUp(self):
super(OpenIDConnectBaseTests, self).setUp()
self.auth_url = uuid.uuid4().hex
def create(self, **kwargs):
kwargs.setdefault('auth_url', self.auth_url)
loader = loading.get_plugin_loader(self.plugin_name)
return loader.load_from_options(**kwargs)
def test_base_options_are_there(self):
options = loading.get_plugin_loader(self.plugin_name).get_options()
self.assertTrue(
set(['client-id', 'client-secret', 'access-token-endpoint',
'access-token-type', 'openid-scope',
'discovery-endpoint']).issubset(
set([o.name for o in options]))
)
# openid-scope gets renamed into "scope"
self.assertIn('scope', [o.dest for o in options])
class OpenIDConnectClientCredentialsTests(OpenIDConnectBaseTests,
utils.TestCase):
plugin_name = "v3oidcclientcredentials"
def test_options(self):
options = loading.get_plugin_loader(self.plugin_name).get_options()
self.assertTrue(
set(['openid-scope']).issubset(
set([o.name for o in options]))
)
def test_basic(self):
access_token_endpoint = uuid.uuid4().hex
scope = uuid.uuid4().hex
identity_provider = uuid.uuid4().hex
protocol = uuid.uuid4().hex
scope = uuid.uuid4().hex
client_id = uuid.uuid4().hex
client_secret = uuid.uuid4().hex
oidc = self.create(identity_provider=identity_provider,
protocol=protocol,
access_token_endpoint=access_token_endpoint,
client_id=client_id,
client_secret=client_secret,
scope=scope)
self.assertEqual(scope, oidc.scope)
self.assertEqual(identity_provider, oidc.identity_provider)
self.assertEqual(protocol, oidc.protocol)
self.assertEqual(access_token_endpoint, oidc.access_token_endpoint)
self.assertEqual(client_id, oidc.client_id)
self.assertEqual(client_secret, oidc.client_secret)
class OpenIDConnectPasswordTests(OpenIDConnectBaseTests, utils.TestCase):
plugin_name = "v3oidcpassword"
def test_options(self):
options = loading.get_plugin_loader(self.plugin_name).get_options()
self.assertTrue(
set(['username', 'password'
|
, 'openid-scope']).issubset(
set([o.name for o in options]))
|
)
def test_basic(self):
access_token_endpoint = uuid.uuid4().hex
username = uuid.uuid4().hex
password = uuid.uuid4().hex
scope = uuid.uuid4().hex
identity_provider = uuid.uuid4().hex
protocol = uuid.uuid4().hex
scope = uuid.uuid4().hex
client_id = uuid.uuid4().hex
client_secret = uuid.uuid4().hex
oidc = self.create(username=username,
password=password,
identity_provider=identity_provider,
protocol=protocol,
access_token_endpoint=access_token_endpoint,
client_id=client_id,
client_secret=client_secret,
scope=scope)
self.assertEqual(username, oidc.username)
self.assertEqual(password, oidc.password)
self.assertEqual(scope, oidc.scope)
self.assertEqual(identity_provider, oidc.identity_provider)
self.assertEqual(protocol, oidc.protocol)
self.assertEqual(access_token_endpoint, oidc.access_token_endpoint)
self.assertEqual(client_id, oidc.client_id)
self.assertEqual(client_secret, oidc.client_secret)
class OpenIDConnectAuthCodeTests(OpenIDConnectBaseTests, u
|
VerifiableRobotics/LTLMoP
|
src/lib/configEditor.py
|
Python
|
gpl-3.0
| 64,636 | 0.003992 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Fri Dec 16 03:13:38 2011
import wx, wx.richtext, wx.grid, wx.lib.intctrl
import sys, os, re
# Climb the tree to find out where we are
p = os.path.abspath(__file__)
t = ""
while t != "src":
(p, t) = os.path.split(p)
if p == "":
print "I have no idea where I am; this is ridiculous"
sys.exit(1)
sys.path.append(os.path.join(p,"src","lib"))
import project
from copy import deepcopy
from numpy import *
import subprocess
import socket
import handlerSubsystem
from hsubParsingUtils import parseCallString
import lib.handlers.handlerTemplates as ht
import lib.globalConfig
from lib.hsubConfigObjects import ExperimentConfig, RobotConfig
# begin wxGlade: extracode
# end wxGlade
CALIB_PORT = 23460
def drawParamConfigPane(target, method, proj):
if target.GetSizer() is not None:
target.GetSizer().Clear(deleteWindows=True)
list_sizer = wx.BoxSizer(wx.VERTICAL)
label_info = wx.StaticText(target, -1, method.comment)
label_info.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
static_line = wx.StaticLine(target, -1)
list_sizer.Add(label_info, 0, wx.ALL|wx.EXPAND, 5)
list_sizer.Add(static_line, 0, wx.EXPAND, 0)
param_controls = {}
for p in method.para:
#print "name: %s, para_type: %s, default: %s, value: %s" % (p.name, p.para_type, p.default, p.value)
item_sizer = wx.BoxSizer(wx.HORIZONTAL)
param_label = wx.StaticText(target, -1, "%s:" % p.name)
if p.para_type is None:
continue
if p.para_type.lower() == "region":
r_names = [r.name for r in proj.rfi.regions if r.name.lower() != "boundary" and not r.isObstacle]
param_controls[p] = wx.ComboBox(target, -1, choices=r_names, style=wx.CB_DROPDOWN)
if p.value is not None and p.value in r_names:
param_controls[p].SetStringSelection(p.value)
elif p.default is not None and p.value in r_names:
p.value = p.default
param_controls[p].SetStringSelection(p.default)
else:
p.value = r_names[0]
param_controls[p].SetSelection(0)
elif p.para_type.lower().startswith("bool"):
param_controls[p] = wx.CheckBox(target, -1, "")
if p.value is not None:
param_controls[p].SetValue(p.value)
elif p.default is not None:
p.value = p.default
param_controls[p].SetValue(p.default)
else:
p.value = "False"
param_controls[p].SetValue(False)
elif p.para_type.lower().startswith("int"):
param_controls[p] = wx.lib.intctrl.IntCtrl(target, -1, 0)
if p.min_val is not None:
param_controls[p].SetMin(p.min_val)
param_controls[p].SetLimited(True)
if p.max_val is not None:
param_controls[p].SetMax(p.max_val)
param_controls[p].SetLimited(True)
if p.value is not None:
param_controls[p].SetValue(p.value)
elif p.default is not None:
p.value = p.default
param_controls[p].SetValue(p.default)
else:
p.value = "0"
param_controls[p].SetValue(0)
else:
if p.value is not None:
param_controls[p] = wx.TextCtrl(target, -1, str(p.value))
elif p.default is not None:
p.value = p.default
param_controls[p] = wx.TextCtrl(target, -1, str(p.default))
else:
p.value = ""
param_controls[p] = wx.TextCtrl(target, -1, "")
param_label.SetToolTip(wx.ToolTip(p.desc))
item_sizer = wx.BoxSizer(wx.HORIZONTAL)
item_sizer.Add(param_label, 0, wx.ALL, 5)
item_sizer.Add(param_controls[p], 1, wx.ALL, 5)
list_sizer.Add(item_sizer, 0, wx.EXPAND, 0)
# TODO: is there a better way to do this?
def paramPaneCallback(event):
this_param = None
for p in method.para:
if event.GetEventObject() i
|
s param_controls[p]:
this_param = p
break
if this_param is None:
# Ignore; from another control (e.g. calib matrix)
return
this_param.setValue(param_controls[this_param].GetValue())
target.Bind(wx.EVT_TEXT, paramPaneCa
|
llback)
target.Bind(wx.EVT_COMBOBOX, paramPaneCallback)
target.Bind(wx.EVT_CHECKBOX, paramPaneCallback)
target.Bind(wx.lib.intctrl.EVT_INT, paramPaneCallback)
target.SetSizer(list_sizer)
target.Layout()
label_info.Wrap(list_sizer.GetSize()[0])
class regionTagsDialog(wx.Dialog):
def __init__(self, parent, *args, **kwds):
# begin wxGlade: regionTagsDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.label_5 = wx.StaticText(self, wx.ID_ANY, "Tags:")
self.list_box_tags = wx.ListBox(self, wx.ID_ANY, choices=[], style=wx.LB_SINGLE)
self.button_add_tag = wx.Button(self, wx.ID_ADD, "")
self.button_remove_tag = wx.Button(self, wx.ID_REMOVE, "")
self.label_12 = wx.StaticText(self, wx.ID_ANY, "Regions:")
self.list_box_regions = wx.CheckListBox(self, wx.ID_ANY, choices=[])
self.static_line_2 = wx.StaticLine(self, wx.ID_ANY)
self.button_5 = wx.Button(self, wx.ID_OK, "")
self.button_8 = wx.Button(self, wx.ID_CANCEL, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_LISTBOX, self.onClickTag, self.list_box_tags)
self.Bind(wx.EVT_BUTTON, self.onClickAddTag, self.button_add_tag)
self.Bind(wx.EVT_BUTTON, self.onClickRemoveTag, self.button_remove_tag)
# end wxGlade
self.proj = parent.proj
self.Bind(wx.EVT_CHECKLISTBOX, self.onCheckRegion, self.list_box_regions)
def __set_properties(self):
# begin wxGlade: regionTagsDialog.__set_properties
self.SetTitle("Edit Region Tags...")
self.SetSize((577, 419))
# end wxGlade
def __do_layout(self):
# begin wxGlade: regionTagsDialog.__do_layout
sizer_31 = wx.BoxSizer(wx.VERTICAL)
sizer_34 = wx.BoxSizer(wx.HORIZONTAL)
sizer_32 = wx.BoxSizer(wx.HORIZONTAL)
sizer_35 = wx.BoxSizer(wx.VERTICAL)
sizer_33 = wx.BoxSizer(wx.VERTICAL)
sizer_36 = wx.BoxSizer(wx.HORIZONTAL)
sizer_33.Add(self.label_5, 0, 0, 0)
sizer_33.Add(self.list_box_tags, 1, wx.TOP | wx.BOTTOM | wx.EXPAND, 5)
sizer_36.Add(self.button_add_tag, 0, 0, 0)
sizer_36.Add(self.button_remove_tag, 0, wx.LEFT, 10)
sizer_33.Add(sizer_36, 0, wx.EXPAND, 0)
sizer_32.Add(sizer_33, 1, wx.RIGHT | wx.EXPAND, 5)
sizer_35.Add(self.label_12, 0, 0, 0)
sizer_35.Add(self.list_box_regions, 1, wx.TOP | wx.EXPAND, 5)
sizer_32.Add(sizer_35, 1, wx.EXPAND, 0)
sizer_31.Add(sizer_32, 1, wx.ALL | wx.EXPAND, 5)
sizer_31.Add(self.static_line_2, 0, wx.EXPAND, 0)
sizer_34.Add((20, 20), 1, wx.EXPAND, 0)
sizer_34.Add(self.button_5, 0, wx.RIGHT, 10)
sizer_34.Add(self.button_8, 0, 0, 0)
sizer_31.Add(sizer_34, 0, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_31)
self.Layout()
# end wxGlade
def _tags2dialog(self, tags):
self.tags = tags
# Populate tags and regions
self.list_box_tags.Set(self.tags.keys())
if self.list_box_tags.GetCount() > 0:
self.list_box_tags.SetSelection(0)
self.button_remove_tag.Enable(True)
self.onClickTag(None)
else:
self.button_remove_tag.Enable(False)
def onCheckRegion(self, event):
tag = self.list_box_tags.GetStringSelection()
self.tags[tag] = self.list_box_regions.GetCheckedStrings()
event.Skip()
def onClickTag(self, event): # wxGlade: regionTagsDialog.<event_handler>
if event is not None:
tag = event.GetString()
|
akohlmey/lammps
|
lib/mesont/Install.py
|
Python
|
gpl-2.0
| 3,025 | 0.006942 |
#!/usr/bin/env python
"""
Install.py tool to do a generic build of a library
soft linked to by many of the lib/Install.py files
used to automate the steps described in the corresponding lib/README
"""
from __future__ import print_function
import sys, os, subprocess
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import get_cpus, fullpath
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
HELP = """
Syntax from src dir: make lib-libname args="-m machine -e suffix"
Syntax from lib dir: python Install.py -m machine -e suffix
libname = name of lib dir (e.g. atc, h5md, meam, poems, etc)
specify -m and optionally -e, order does not matter
Examples:
make lib-poems args="-m serial" # build POEMS lib with same settings as in the serial Makefile in src
make lib-colvars args="-m mpi" # build COLVARS lib with same settings as in the mpi Makefile in src
make lib-meam args="-m ifort" # build MEAM lib with custom Makefile.ifort (using Intel Fortran)
"""
# parse and process arguments
parser.add_argument("-m", "--machine",
help="suffix of a <libname>/Makefile.* file used for compiling this library")
parser.add_argument("-e", "--extramake",
help="set EXTRAMAKE variable in <libname>/Makefile.<machine> to Makefile.lammps.<extramake>")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.machine and not args.extramake:
parser.print_help()
sys.exit(HELP)
machine
|
= args.machine
extraflag = args.extramake
if extraflag:
suffix = args.extramake
else:
suffix = 'empty'
# set lib from working dir
cwd = fullpath('.')
lib = os.path.b
|
asename(cwd)
# create Makefile.auto as copy of Makefile.machine
# reset EXTRAMAKE if requested
if not os.path.exists("Makefile.%s" % machine):
sys.exit("lib/%s/Makefile.%s does not exist" % (lib, machine))
lines = open("Makefile.%s" % machine, 'r').readlines()
fp = open("Makefile.auto", 'w')
has_extramake = False
for line in lines:
words = line.split()
if len(words) == 3 and words[0] == "EXTRAMAKE" and words[1] == '=':
has_extramake = True
if extraflag:
line = line.replace(words[2], "Makefile.lammps.%s" % suffix)
fp.write(line)
fp.close()
# make the library via Makefile.auto optionally with parallel make
n_cpus = get_cpus()
print("Building lib%s.a ..." % lib)
cmd = "make -f Makefile.auto clean; make -f Makefile.auto -j%d" % n_cpus
try:
txt = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
if os.path.exists("lib%s.a" % lib):
print("Build was successful")
else:
sys.exit("Build of lib/%s/lib%s.a was NOT successful" % (lib, lib))
if has_extramake and not os.path.exists("Makefile.lammps"):
print("WARNING: lib/%s/Makefile.lammps was NOT created" % lib)
|
midvalestudent/jupyter
|
docker/base/jupyter_notebook_config.py
|
Python
|
mit
| 1,414 | 0.004243 |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import errno
import stat
PEM_FILE = os.path.join(jupyter_data_dir(), 'notebook.pem')
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.port = 8
|
888
c.NotebookApp.open_browser = False
# Set a certificate if USE_HTTPS is set to any value
if 'USE_HTTPS' in os.environ:
if not os.path.isfile(PEM_FILE):
# Ensure PEM_FILE direc
|
tory exists
dir_name = os.path.dirname(PEM_FILE)
try:
os.makedirs(dir_name)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir_name):
pass
else: raise
# Generate a certificate if one doesn't exist on disk
subprocess.check_call(['openssl', 'req', '-new',
'-newkey', 'rsa:2048', '-days', '365', '-nodes', '-x509',
'-subj', '/C=XX/ST=XX/L=XX/O=generated/CN=generated',
'-keyout', PEM_FILE, '-out', PEM_FILE])
# Restrict access to PEM_FILE
os.chmod(PEM_FILE, stat.S_IRUSR | stat.S_IWUSR)
c.NotebookApp.certfile = PEM_FILE
# Set a password if PASSWORD is set
if 'PASSWORD' in os.environ:
from IPython.lib import passwd
c.NotebookApp.password = passwd(os.environ['PASSWORD'])
del os.environ['PASSWORD']
|
DheerendraRathor/ldap-oauth2
|
application/views.py
|
Python
|
gpl-3.0
| 4,953 | 0.003836 |
from braces.views import LoginRequiredMixin
from django.views.generic import UpdateView
from oauth2_provider.exceptions import OAuthToolkitError
from oauth2_provider.http import HttpResponseUriRedirect
from oauth2_provider.models import get_application_model as get_oauth2_application_model
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views import AuthorizationView
from oauth2_provider.views.application import ApplicationRegistration
from core.utils import get_default_scopes
from .forms import RegistrationForm
class ApplicationRegistrationView(ApplicationRegistration):
form_class = RegistrationForm
class ApplicationUpdateView(LoginRequiredMixin, UpdateView):
"""
View used to update an application owned by the request.user
"""
form_class = RegistrationForm
context_object_name = 'application'
template_name = "oauth2_provider/application_form.html"
def get_queryset(self):
return get_oauth2_application_model().objects.filter(user=self.request.user)
class CustomAuthorizationView(AuthorizationView):
def form_valid(self, form):
client_id = form.cleaned_data.get('client_id', '')
application = get_oauth2_application_model().objects.get(client_id=client_id)
scopes = form.cleaned_data.get('scope', '')
scopes = set(scopes.split(' '))
scopes.update(set(get_default_scopes(application)))
private_scopes = application.private_scopes
if private_scopes:
private_scopes = set(private_scopes.split(' '))
scopes.update(private_scopes)
scopes = ' '.join(list(scopes))
form.cleaned_data['scope'] = scopes
return super(CustomAuthorizationView, self).form_valid(form)
def get(self, request, *args, **kwargs):
"""
Copied blatantly from super method. Had to change few stuff, but didn't find better way
than copying and editing the whole stuff.
Sin Count += 1
"""
try:
scopes, credentials = self.validate_authorization_request(request)
try:
del credentials['request']
# Removing oauthlib.Request from credentials. This is not required in future
except KeyError: # pylint: disable=pointless-except
pass
kwargs['scopes_descriptions'] = [oauth2_settings.SCOPES[scope] for scope in scopes]
kwargs['scopes'] = scopes
# at this point we know an Application instance with such client_id exists in the database
application = get_oauth2_application_model().objects.get(
client_id=credentials['client_id']) # TODO: cache it!
kwargs['application'] = application
kwargs.update(credentials)
self.oauth2_data = kwargs
# following two loc are here only because of https://code.djangoproject.com/ticket/17795
form = self.get_form(self.get_form_class())
kwargs['form'] = form
# Check to see if the user has already granted access and return
# a successful response depending on 'approval_prompt' url parameter
require_approval = request.GET.get('approval_prompt', oauth2_settings.REQUEST_APPROVAL_PROMPT)
# If skip_authorization field is True, skip the authorization screen even
# if this is the first use of the application and there was no previous authorization.
# This is useful for in-house applications-> assume an in-house applications
# are already approved.
if application.skip_authorization:
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=" ".join(scopes),
credentials=credentials, allow=True)
return HttpResponseUriRedirect(uri)
elif require_approval == 'auto':
tokens = request.user.accesstoken_set.filter(application=kwargs['application']).all().order_by('-id')
if len(tokens) > 0:
token = tokens[0]
if len(tokens) > 1:
# Enforce one token pair per user policy. Remove all older tokens
request.user.accesstoken_set.exclude(pk=token
|
.id).all().delete()
# check past authorizations regarded the same scopes as the current one
if token.allow_scopes(scopes):
uri, headers, body, statu
|
s = self.create_authorization_response(
request=self.request, scopes=" ".join(scopes),
credentials=credentials, allow=True)
return HttpResponseUriRedirect(uri)
return self.render_to_response(self.get_context_data(**kwargs))
except OAuthToolkitError as error:
return self.error_response(error)
|
jokajak/itweb
|
data/env/lib/python2.6/site-packages/SQLAlchemy-0.6.7-py2.6.egg/sqlalchemy/pool.py
|
Python
|
gpl-3.0
| 33,545 | 0.002832 |
# sqlalchemy/pool.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import weakref, time, threading
from sqlalchemy import exc, log
from sqlalchemy import queue as sqla_queue
from sqlalchemy.util import threading, pickle, as_interface, memoized_property
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.itervalues():
manager.close()
proxies.clear()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True, listeners=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
:param reset_on_return: If true, reset the database state of
connections returned to the pool. This is typically a
ROLLBACK to release locks and transaction resources.
Disable at your own peril. Defaults to True.
:param listeners: A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events
|
when DB-API
connections are created, checked out and checked in to the
pool.
"""
if logging_name:
self.logging_name = self._or
|
ig_logging_name = logging_name
else:
self._orig_logging_name = None
self.logger = log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._use_threadlocal = use_threadlocal
self._reset_on_return = reset_on_return
self.echo = echo
self.listeners = []
self._on_connect = []
self._on_first_connect = []
self._on_checkout = []
self._on_checkin = []
if listeners:
for l in listeners:
self.add_listener(l)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is different from :meth:`.Pool.connect` only if the
``use_threadlocal`` flag has been set to ``True``.
"""
return _ConnectionFairy(self).checkout()
def create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunection with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, It is advised to not reuse the pool once dispose()
is called, and to instead use a new pool constructed by the
recreate() method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy(self).checkout()
try:
rec = self._threadconns.current()
if rec:
return rec.checkout()
except AttributeError:
pass
agent = _ConnectionFairy(self)
self._threadconns.current = weakref.ref(agent)
return agent.checkout()
def return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal and hasattr(self._threadconns, "current"):
del self._threadconns.current
self.do_return_conn(record)
def get(self):
"""Return a non-instrumented DBAPI connection from this :class:`.Pool`.
This is called by ConnectionRecord in order to get its DBAPI
resource.
"""
return self.do_get()
def do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
def add_listener(self, listener):
"""Add a ``PoolListener``-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
listener = as_interface(listener,
methods=('connect', 'first_connect', 'checkout', 'checkin'))
self.listeners.append(listener)
if hasattr(listener, 'connect'):
self._on_connect.append(listener)
if
|
ggtracker/sc2reader
|
sc2reader/events/game.py
|
Python
|
mit
| 25,673 | 0.002454 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
from sc2reader.utils import Length
from sc2reader.events.base import Event
from sc2reader.log_utils import loggable
from itertools import chain
@loggable
class GameEvent(Event):
"""
This is the base class for all game events. The attributes below are universally available.
"""
def __init__(self, frame, pid):
#: The id of the player generating the event. This is 16 for global non-player events.
#: Prior to Heart of the Swarm this was the player id. Since HotS it is
#: now the user id (uid), we still call it pid for backwards compatibility. You shouldn't
#: ever need to use this; use :attr:`player` instead.
self.pid = pid
#: A reference to the :class:`~sc2reader.objects.Player` object representing
#: this player in the replay. Not available for global events (:attr:`is_local` = False)
self.player = None
#: The frame of the game that this event was recorded at. 16 frames per game second.
self.frame = frame
#: The second of the game that this event was recorded at. 16 frames per game second.
self.second = frame >> 4
#: A flag indicating if it is a local or global event.
self.is_local = pid != 16
#: Short cut string for event class name
self.name = self.__class__.__name__
def _str_prefix(self):
if getattr(self, "pid", 16) == 16:
player_name = "Global"
elif self.player and not self.player.name:
player_name = "Player {0} - ({1})".format(
self.player.pid, self.player.play_race
)
elif self.player:
player_name = self.player.name
else:
player_name = "no name"
return "{0}\t{1:<15} ".format(Length(seconds=int(self.frame / 16)), player_name)
def __str__(self):
return self._str_prefix() + self.name
class GameStartEvent(GameEvent):
"""
Recorded when the game starts and the frames start to roll. This is a global non-player
event.
"""
def __init__(self, frame, pid, data):
super(GameStartEvent, self).__init__(frame, pid)
#: ???
self.data = data
class PlayerLeaveEvent(GameEvent):
"""
Recorded when a player leaves the game.
"""
def __init__(self, frame, pid, data):
super(PlayerLeaveEvent, self).__init__(frame, pid)
#: ???
self.data = data
class UserOptionsEvent(GameEvent):
"""
This event is recorded for each player at the very beginning of the game before the
:class:`GameStartEvent`.
"""
def __init__(self, frame, pid, data):
super(UserOptionsEvent, self).__init__(frame, pid)
#:
self.game_fully_downloaded = data["game_fully_downloaded"]
#:
self.development_cheats_enabled = data["development_cheats_enabled"]
#:
self.multiplayer_cheats_enabled = data["multiplayer_cheats_enabled"]
#:
self.sync_checksumming_enabled = data["sync_checksumming_enabled"]
#:
self.is_map_to_map_transition = data["is_map_to_map_transition"]
#:
self.use_ai_beacons = data["use_ai_beacons"]
#: Are workers sent to auto-mine on game start
self.starting_rally = (
data["starting_rally"] if "starting_rally" in data else None
)
#:
self.debug_pause_enabled = data["debug_pause_enabled"]
#:
self.base_build_num = data["base_build_num"]
def create_command_event(frame, pid, data):
ability_type = data["data"][0]
if ability_type == "None":
return BasicCommandEvent(frame, pid, data)
elif ability_type == "TargetUnit":
return TargetUnitCommandEvent(frame, pid, data)
elif ability_type == "TargetPoint":
return TargetPointCommandEvent(frame, pid, data)
elif ability_type == "Data":
return DataCommandEvent(frame, pid, data)
@loggable
class CommandEvent(GameEvent):
"""
Ability events are generated when ever a player in the game issues a command
to a unit or group of units. They are split into three subclasses of ability,
each with their own set of associated data. The attributes listed below are
shared across all ability event types.
See :class:`TargetPointCommandEvent`, :class:`TargetUnitCommandEvent`, and
:class:`DataCommandEvent` for individual details.
"""
def __init__(self, frame, pid, data):
super(CommandEvent, self).__init__(frame, pid)
#: Flags on the command???
self.flags = data["flags"]
#: A dictionary of possible ability flags. Flags are:
#:
#: * alternate
#: * queued
#: * preempt
#: * smart_click
#: * smart_rally
#: * subgroup
#: * set_autocast,
#: * set_autocast_on
#: * user
#: * data_a
#: * data_b
#: * data_passenger
#: * data_abil_queue_order_id,
#: * ai
#: * ai_ignore_on_finish
#: * is_order
#: * script
#: * homogenous_interruption,
#: * minimap
#: * repeat
#: * dispatch_to_other_unit
#: * target_self
#:
self.flag = dict(
alternate=0x1 & self.flags != 0,
queued=0x2 & self.flags != 0,
preempt=0x4 & self.flags != 0,
smart_click=0x8 & self.flags != 0,
smart_rally=0x10 & self.flags != 0,
subgroup=0x20 & self.flags != 0,
set_autocast=0x40 & self.flags != 0,
set_autocast_on=0x80 & self.flags != 0,
user=0x100 & self.flags != 0,
data_a=0x200 & self.flags != 0,
data_passenger=0x200 & self.flags != 0, # alt-name
data_b=0x400 & self.flags != 0,
data_abil_queue_order_id=0x400 & self.flags != 0, # alt-name
ai=0x800 & self.flags != 0,
ai_ignore_on_finish=0x1000 & self.flags != 0,
is_order=0x2000 & self.flags != 0,
script=0x4000 & self.flags != 0,
homogenous_interruption=0x8000 & self.flags != 0,
minimap=0x10000 & self.flags != 0,
repeat=0x20000 & self.flags != 0,
dispatch_to_other_unit=0x40000 & self.flags != 0,
target_self=0x80000 & self.flags != 0,
)
#: Flag marking that the command had ability information
self.has_ability = data["ability"] is not None
#: Link the the ability group
self.ability_link = data["ability"]["ability_link"] if self.has_ability else 0
#: The ind
|
ex of the ability in the ability group
self.command_index = (
data["ability"]["ability_command_index"] if self.has_ability else 0
)
#: Additional ability data.
self.ability_data = (
data["ability"]["
|
ability_command_data"] if self.has_ability else 0
)
#: Unique identifier for the ability
self.ability_id = self.ability_link << 5 | self.command_index
#: A reference to the ability being used
self.ability = None
#: A shortcut to the name of the ability being used
self.ability_name = ""
#: The type of ability, one of: None (no target), TargetPoint, TargetUnit, or Data
self.ability_type = data["data"][0]
#: The raw data associated with this ability type
self.ability_type_data = data["data"][1]
#: Other unit id??
self.other_unit_id = data["other_unit_tag"]
#: A reference to the other unit
self.other_unit = None
def __str__(self):
string = self._str_prefix()
if self.has_ability:
string += "Ability ({0:X})".format(self.ability_id)
if self.ability:
string += " - {0}".format(self.ability.name)
else:
string += "Right Click"
if self.ability_type == "TargetUnit":
string += "; Target: {0} [{1:0>8X}]".format(
self.target.name, self.target_
|
skirsdeda/djangocms-blog
|
djangocms_blog/migrations/0014_auto_20160215_1331.py
|
Python
|
bsd-3-clause
| 1,773 | 0.003384 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.models import Page
from cms.utils.i18n import get_language_list
from django.db import migrations, models
def forwards(apps, schema_editor):
BlogConfig = apps.get_model('djangocms_blog', 'BlogConfig')
BlogConfigTranslation = apps.get_model('djangocms_blog', 'BlogConfigTranslation')
Post = apps.get_model('djangocms_blog', 'Post')
BlogCategory = apps.get_model('djangocms_blog', 'BlogCategory')
GenericBlogPlugin = apps.get_model('djangocms_blog', 'GenericBlogPlugin')
LatestPostsPlugin = apps.get_model('djangocms_blog', 'LatestPostsPlugin')
AuthorEntriesPlugin = apps.get_model('djangocms_blog', 'AuthorEntriesPlugin')
config = None
for page in Page.obj
|
ects.drafts().filter(application_urls='BlogApp'):
config, created = BlogConfig.objects.get_or_create(namespace=page.application_namespace)
if not BlogConfigTranslation.objects.exists():
for lang in get_language_list():
|
title = page.get_title(lang)
translation = BlogConfigTranslation.objects.create(language_code=lang, master_id=config.pk, app_title=title)
if config:
for model in (Post, BlogCategory, GenericBlogPlugin, LatestPostsPlugin, AuthorEntriesPlugin):
for item in model.objects.filter(app_config__isnull=True):
item.app_config = config
item.save()
def backwards(apps, schema_editor):
# No need for backward data migration
pass
class Migration(migrations.Migration):
dependencies = [
('cms', '0004_auto_20140924_1038'),
('djangocms_blog', '0013_auto_20160201_2235'),
]
operations = [
migrations.RunPython(forwards, backwards),
]
|
ifduyue/sentry
|
tests/sentry/integrations/vsts/test_integration.py
|
Python
|
bsd-3-clause
| 12,757 | 0.000392 |
from __future__ import absolute_import
from sentry.identity.vsts import VSTSIdentityProvider
from sentry.integrations.exceptions import IntegrationError
from sentry.integrations.vsts import VstsIntegration, VstsIntegrationProvider
from sentry.models import (
Integration, IntegrationExternalProject, OrganizationIntegration, Repository,
Project
)
from sentry.plug
|
ins import plugins
from tests.sentry.plugins.testutils import VstsPlugin # NOQA
from .testutils import VstsIntegrationTestCase, CREATE_SUBSCRIPTION
class VstsIntegrationProviderTest(VstsIntegrationTestCase):
# Test data setup in ``VstsIntegrationTestCase``
def test_basic_
|
flow(self):
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
assert integration.external_id == self.vsts_account_id
assert integration.name == self.vsts_account_name
metadata = integration.metadata
assert metadata['scopes'] == list(VSTSIdentityProvider.oauth_scopes)
assert metadata['subscription']['id'] == \
CREATE_SUBSCRIPTION['publisherInputs']['tfsSubscriptionId']
assert metadata['domain_name'] == '{}.visualstudio.com'.format(
self.vsts_account_name
)
def test_migrate_repositories(self):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name='NotReachable',
url='https://randoaccount.visualstudio.com/Product/_git/NotReachable',
provider='visualstudio',
external_id='123456789',
)
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
assert Repository.objects.get(
id=accessible_repo.id,
).integration_id == integration.id
assert Repository.objects.get(
id=inaccessible_repo.id,
).integration_id is None
def setupPluginTest(self):
self.project = Project.objects.create(
organization_id=self.organization.id,
)
self.plugin = plugins.get('vsts')
self.plugin.enable(self.project)
def test_disabled_plugin_when_fully_migrated(self):
self.setupPluginTest()
Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
# Enabled before Integration installation
assert 'vsts' in [p.slug for p in plugins.for_project(self.project)]
self.assert_installation()
# Disabled
assert 'vsts' not in [p.slug for p in plugins.for_project(self.project)]
def test_doesnt_disable_plugin_when_partially_migrated(self):
self.setupPluginTest()
# Repo accessible by new Integration
Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
# Inaccessible Repo - causes plugin to stay enabled
Repository.objects.create(
organization_id=self.organization.id,
name='NotReachable',
url='https://randoaccount.visualstudio.com/Product/_git/NotReachable',
provider='visualstudio',
external_id='123456789',
)
self.assert_installation()
# Still enabled
assert 'vsts' in [p.slug for p in plugins.for_project(self.project)]
def test_build_integration(self):
state = {
'account': {
'AccountName': self.vsts_account_name,
'AccountId': self.vsts_account_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
}
integration = VstsIntegrationProvider()
integration_dict = integration.build_integration(state)
assert integration_dict['name'] == self.vsts_account_name
assert integration_dict['external_id'] == self.vsts_account_id
assert integration_dict['metadata']['domain_name'] == \
'{}.visualstudio.com'.format(self.vsts_account_name)
assert integration_dict['user_identity']['type'] == 'vsts'
assert integration_dict['user_identity']['external_id'] == \
self.vsts_account_id
assert integration_dict['user_identity']['scopes'] == sorted(
VSTSIdentityProvider.oauth_scopes)
def test_webhook_subscription_created_once(self):
self.assert_installation()
state = {
'account': {
'AccountName': self.vsts_account_name,
'AccountId': self.vsts_account_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
}
# The above already created the Webhook, so subsequent calls to
# ``build_integration`` should omit that data.
data = VstsIntegrationProvider().build_integration(state)
assert 'subscription' not in data['metadata']
def test_fix_subscription(self):
external_id = '1234567890'
Integration.objects.create(
metadata={},
provider='vsts',
external_id=external_id,
)
data = VstsIntegrationProvider().build_integration({
'account': {
'AccountName': self.vsts_account_name,
'AccountId': external_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
})
assert external_id == data['external_id']
subscription = data['metadata']['subscription']
assert subscription['id'] is not None and subscription['secret'] is not None
class VstsIntegrationTest(VstsIntegrationTestCase):
def test_get_organization_config(self):
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
fields = integration.get_installation(
integration.organizations.first().id
).get_organization_config()
assert [field['name'] for field in fields] == [
'sync_status_forward',
'sync_forward_assignment',
'sync_comments',
'sync_status_reverse',
'sync_reverse_assignment',
]
def test_update_organization_config_remove_all(self):
self.assert_installation()
model = Integration.objects.get(provider='vsts')
integration = VstsIntegration(model, self.organization.id)
|
mthpower/timeline-blog
|
timeline/timeline/settings.py
|
Python
|
mit
| 2,922 | 0.000342 |
"""
Django settings for timeline project.
For more information on this file, see
https://docs.djangoproject.com/en/1.
|
7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.p
|
ath.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nv&mfrq1ou*#1%hq7_8o)vf24$ar09m(*)oras0-mzmg!bwjlu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'timeline',
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'timeline.urls'
WSGI_APPLICATION = 'timeline.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'timeline-db',
'USER': 'postgres',
'PASSWORD': 'zawnza',
'HOST': 'localhost',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
# Media files (User uploaded files)
# WARNING: This is (probably) not a sensible configuration for prod.
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Template Directories
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
)
CORS_ORIGIN_ALLOW_ALL = True
|
kytos/kytos-utils
|
kytos/cli/commands/web/api.py
|
Python
|
mit
| 986 | 0 |
"""Translate cli commands to non-cli code."""
import logging
from urllib.error import HTTPError, URLError
import requests
from kytos.utils.config import KytosConfig
LOG = logging.getLogger(__name__)
class WebAPI: # pylint: disable=too-few-public-methods
"""An API for the command-line interface."""
@classmethod
def update(cls, args):
"""Call the method to update the Web UI."""
kytos_api = KytosConfig().config.get('kytos', 'api')
url = f"{kytos
|
_api}api/kytos/core/web/update"
version = args["<version>"]
if version:
url += f"/{version}"
try:
result = requests.post(url)
except(HTTPError, URLError, requests.exceptions.ConnectionError):
LOG.error("
|
Can't connect to server: %s", kytos_api)
return
if result.status_code != 200:
LOG.info("Error while updating web ui: %s", result.content)
else:
LOG.info("Web UI updated.")
|
fredericlepied/os-net-config
|
os_net_config/tests/test_cli.py
|
Python
|
apache-2.0
| 4,656 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import sys
from os_net_config import cli
from os_net_config.tests import base
import six
REALPATH = os.path.dirname(os.path.realpath(__file__))
SAMPLE_BASE = os.path.join(REALPATH, '../../', 'etc',
'os-net-config', 'samples')
class TestCli(base.TestCase):
def run_cli(self, argstr, exitcodes=(0,)):
orig = sys.stdout
orig_stderr = sys.stderr
sys.stdout = six.StringIO()
sys.stderr = six.StringIO()
ret = cli.main(argstr.split())
self.assertIn(ret, exitcodes)
stdout = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
stderr = sys.stderr.getvalue()
sys.stderr.close()
sys.stderr = orig_stderr
return (stdout, stderr)
def test_bond_noop_output(self):
bond_yaml = os.path.join(SAMPLE_BASE
|
, 'bond.yaml')
bond_json = os.path.join(SAMPLE_BASE, 'bond.json')
stdout_yaml, stderr = self.run_cli('ARG0 --pr
|
ovider=ifcfg --noop '
'-c %s' % bond_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'-c %s' % bond_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=br-ctlplane',
'DEVICE=em2',
'DEVICE=em1',
'DEVICE=bond1',
'DEVICETYPE=ovs']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_bridge_noop_output(self):
bridge_yaml = os.path.join(SAMPLE_BASE, 'bridge_dhcp.yaml')
bridge_json = os.path.join(SAMPLE_BASE, 'bridge_dhcp.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=eni --noop -c %s' %
bridge_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=eni --noop -c %s' %
bridge_json)
self.assertEqual('', stderr)
sanity_devices = ['iface br-ctlplane inet dhcp',
'iface em1',
'ovs_type OVSBridge']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_vlan_noop_output(self):
vlan_yaml = os.path.join(SAMPLE_BASE, 'bridge_vlan.yaml')
vlan_json = os.path.join(SAMPLE_BASE, 'bridge_vlan.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop -c %s'
% vlan_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop -c %s'
% vlan_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=br-ctlplane',
'DEVICE=em1',
'DEVICE=vlan16',
'DEVICETYPE=ovs']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_interface_noop_output(self):
interface_yaml = os.path.join(SAMPLE_BASE, 'interface.yaml')
interface_json = os.path.join(SAMPLE_BASE, 'interface.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop -c %s'
% interface_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop -c %s'
% interface_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=em1',
'BOOTPROTO=static',
'IPADDR=192.0.2.1']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.