text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to change the schedules of your HBZones.
-
Provided by Honeybee 0.0.57
Args:
_HBZones: HBZones for which you want to change shcedules.
occupancySchedule_: A text string representing the occupancy shceudle that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component.
occupancyActivitySchs_: A text string representing the shceudle for the metabolic rate of the occupants that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component. If this is a CSV schedule, the values in it should be Watts and the "units_" input should be "ActivityLevel."
heatingSetPtSchedule_: A text string representing the heating setpoint shceudle that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component. If it is a CSV schedule, the values in it should be temperature values in Celcius and the "units_" input should be "Temperature."
coolingSetPtSchedule_: A text string representing the cooling setpoint shceudle that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component. If it is a CSV schedule, the values in it should be temperature values in Celcius and the "units_" input should be "Temperature."
lightingSchedule_: A text string representing the lighting shceudle that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component.
equipmentSchedule_: A text string representing the equipment shceudle that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component.
infiltrationSchedule_: A text string representing the infiltration shceudle that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component.
HVACAvailabiltySchs_: A text string representing the HVAC availability that you want to use. This can be either a shcedule from the schedule libirary or a CSV file path to a CSV schedule you created with the "Honeybee_Create CSV Schedule" component.
Returns:
schedules: A report of what shcedules are assigned to each zone.
HBZones: HBZones that have had thier shcedules modified.
"""
ghenv.Component.Name = "Honeybee_Set EnergyPlus Zone Schedules"
ghenv.Component.NickName = 'setEPZoneSchedules'
ghenv.Component.Message = 'VER 0.0.57\nJUL_06_2015'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "08 | Energy | Set Zone Properties"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import scriptcontext as sc
import uuid
import Grasshopper.Kernel as gh
import os
def checkTheInputs():
#If the user puts in only one value, apply that value to all of the zones.
def duplicateData(data, calcLength):
dupData = []
for count in range(calcLength):
dupData.append(data[0])
return dupData
if len(occupancySchedules_) == 1: occupancySchedules = duplicateData(occupancySchedules_, len(_HBZones))
else: occupancySchedules = occupancySchedules_
if len(occupancyActivitySchs_) == 1: occupancyActivitySchs = duplicateData(occupancyActivitySchs_, len(_HBZones))
else: occupancyActivitySchs = occupancyActivitySchs_
if len(coolingSetPtSchedules_) == 1: coolingSetPtSchedules = duplicateData(coolingSetPtSchedules_, len(_HBZones))
else: coolingSetPtSchedules = coolingSetPtSchedules_
if len(heatingSetPtSchedules_) == 1: heatingSetPtSchedules = duplicateData(heatingSetPtSchedules_, len(_HBZones))
else: heatingSetPtSchedules = heatingSetPtSchedules_
if len(lightingSchedules_) == 1: lightingSchedules = duplicateData(lightingSchedules_, len(_HBZones))
else: lightingSchedules = lightingSchedules_
if len(equipmentSchedules_) == 1: equipmentSchedules = duplicateData(equipmentSchedules_, len(_HBZones))
else: equipmentSchedules = equipmentSchedules_
if len(infiltrationSchedules_) == 1: infiltrationSchedules = duplicateData(infiltrationSchedules_, len(_HBZones))
else: infiltrationSchedules = infiltrationSchedules_
if len(HVACAvailabilitySchs_) == 1: HVACAvailabilitySchs = duplicateData(HVACAvailabilitySchs_, len(_HBZones))
else: HVACAvailabilitySchs = HVACAvailabilitySchs_
return occupancySchedules, occupancyActivitySchs, coolingSetPtSchedules, heatingSetPtSchedules, lightingSchedules, equipmentSchedules, infiltrationSchedules, HVACAvailabilitySchs
def main(HBZones, occupancySchedule, occupancyActivitySch, heatingSetPtSchedule, coolingSetPtSchedule, lightingSchedule, equipmentSchedule, infiltrationSchedule, HVACAvailabilitySchs):
# check for Honeybee
if not sc.sticky.has_key('honeybee_release'):
print "You should first let Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let Honeybee to fly...")
return -1
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
" Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
# make sure schedules are in HB schedule
schedules = [occupancySchedule, heatingSetPtSchedule, coolingSetPtSchedule, lightingSchedule, equipmentSchedule, infiltrationSchedule, HVACAvailabilitySchs]
HBScheduleList = sc.sticky["honeybee_ScheduleLib"].keys()
for scheduleList in schedules:
for schedule in scheduleList:
if schedule!=None:
schedule= schedule.upper()
if schedule!=None and not schedule.lower().endswith(".csv") and schedule not in HBScheduleList:
msg = "Cannot find " + schedule + " in Honeybee schedule library."
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return -1
elif schedule!=None and schedule.lower().endswith(".csv"):
# check if csv file is existed
if not os.path.isfile(schedule):
msg = "Cannot find the shchedule file: " + schedule
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return -1
# call the objects from the lib
hb_hive = sc.sticky["honeybee_Hive"]()
HBObjectsFromHive = hb_hive.callFromHoneybeeHive(HBZones)
schedules = []
for zoneCount, HBZone in enumerate(HBObjectsFromHive):
if occupancySchedule != [] and occupancySchedule[0] != None:
try: HBZone.occupancySchedule = occupancySchedule[zoneCount]
except: HBZone.occupancySchedule = occupancySchedule[0]
if occupancyActivitySch != [] and occupancyActivitySch[0] != None:
try: HBZone.occupancyActivitySch = occupancyActivitySch[zoneCount]
except: HBZone.occupancyActivitySch = occupancyActivitySch[0]
if heatingSetPtSchedule != [] and heatingSetPtSchedule[0] != None:
try: HBZone.heatingSetPtSchedule = heatingSetPtSchedule[zoneCount]
except: HBZone.heatingSetPtSchedule = heatingSetPtSchedule[0]
if coolingSetPtSchedule != [] and coolingSetPtSchedule[0] != None:
try: HBZone.coolingSetPtSchedule = coolingSetPtSchedule[zoneCount]
except: HBZone.coolingSetPtSchedule = coolingSetPtSchedule[0]
if lightingSchedule != [] and lightingSchedule[0] != None:
try: HBZone.lightingSchedule = lightingSchedule[zoneCount]
except: HBZone.lightingSchedule = lightingSchedule[0]
if equipmentSchedule != [] and equipmentSchedule[0] != None:
try: HBZone.equipmentSchedule = equipmentSchedule[zoneCount]
except: HBZone.equipmentSchedule = equipmentSchedule[0]
if infiltrationSchedule != [] and infiltrationSchedule[0] != None:
try: HBZone.infiltrationSchedule = infiltrationSchedule[zoneCount]
except: HBZone.infiltrationSchedule = infiltrationSchedule[0]
if HVACAvailabilitySchs != [] and HVACAvailabilitySchs[0] != None:
try: HBZone.HVACAvailabilitySched = HVACAvailabilitySchs[zoneCount]
except: HBZone.HVACAvailabilitySched = HVACAvailabilitySchs[0]
schedules.append(HBZone.getCurrentSchedules())
HBZones = hb_hive.addToHoneybeeHive(HBObjectsFromHive, ghenv.Component.InstanceGuid.ToString() + str(uuid.uuid4()))
return HBZones, schedules
occupancySchedules, occupancyActivitySchs, coolingSetPtSchedules, heatingSetPtSchedules, lightingSchedules, equipmentSchedules, infiltrationSchedules, HVACAvailabilitySchs = checkTheInputs()
if _HBZones and _HBZones[0]!=None:
occupancySchedules, occupancyActivitySchs, coolingSetPtSchedules, heatingSetPtSchedules, lightingSchedules, equipmentSchedules, infiltrationSchedules, HVACAvailabilitySchs = checkTheInputs()
results = main(_HBZones, occupancySchedules, occupancyActivitySchs, heatingSetPtSchedules, coolingSetPtSchedules, lightingSchedules, equipmentSchedules, infiltrationSchedules, HVACAvailabilitySchs)
if results != -1: HBZones, schedules = results | samuto/Honeybee | src/Honeybee_Set EnergyPlus Zone Schedules.py | Python | gpl-3.0 | 11,139 | 0.013107 |
#!/usr/bin/env python
"""
builder plugin
Define the plugin base API for builders
for managing virtualenvs of various flavours
"""
import os
import re
import argparse
import subprocess
from collections import namedtuple
from cirrus.configuration import load_configuration
from cirrus.environment import repo_directory
from pluggage.factory_plugin import PluggagePlugin
from cirrus.logger import get_logger
from cirrus.invoke_helpers import local
LOGGER = get_logger()
PythonVersion = namedtuple("PythonVersion", "major minor micro")
CONDA_VERSION_FORMAT = re.compile('^[0-9]{1}\.[0-9]{1}$')
PYTHON_VERSION_FORMAT = re.compile('^python[0-9]{1}\.[0-9]{1}$')
PYTHON_VERSION_MATCH = re.compile('Python [0-9]+\.[0-9]+\.[0-9]+')
def _parse_python_version(s):
"""
take the output of python -V and return
a version namedtuple
"""
x = PYTHON_VERSION_MATCH.match(s.strip())
# split on space into Python/version/blah
elems = x.string.split()
vers = elems[1]
# parse version
maj, minor, micro = vers.split('.', 2)
return PythonVersion(int(maj), int(minor), int(micro))
def normalise_version(v):
if v is None:
return None
result = str(v)
if CONDA_VERSION_FORMAT.match(str(v)):
result = 'python{}'.format(v)
if not PYTHON_VERSION_FORMAT.match(result):
msg = (
"Unable to reconcile python version from cirrus.conf build section:\n"
"Value in cirrus.conf [build]: python={v}\n"
"Expected either pythonX.Y or X.Y format"
)
LOGGER.error(msg)
raise RuntimeError(msg)
return result
def py_version_to_conda(v):
return v.replace('python', '')
def conda_version_to_py(v):
return 'python{}'.format(v)
class Builder(PluggagePlugin):
PLUGGAGE_FACTORY_NAME = 'builder'
def __init__(self):
super(Builder, self).__init__()
self.plugin_parser = argparse.ArgumentParser()
self.config = load_configuration()
self.build_config = self.config.get('build', {})
self.working_dir = repo_directory()
self.venv_name = self.build_config.get('virtualenv_name', 'venv')
self.reqs_name = self.build_config.get('requirements_file', 'requirements.txt')
self.extra_reqs = self.build_config.get('extra_requirements', [])
self.python_bin = self.build_config.get('python', None)
self.extra_reqs = self.str_to_list(self.extra_reqs)
self.venv_path = os.path.join(self.working_dir, self.venv_name)
def process_extra_args(self, extras):
opts, _ = self.plugin_parser.parse_known_args(extras)
return vars(opts)
def create(self, **kwargs):
"""
_create_
create a new python runtime environment
at the location provided
"""
pass
def clean(self, **kwargs):
"""
_clean_
remove the specified runtime environment
"""
pass
def activate(self):
"""
return a shell command string to activate the
runtime environment
"""
pass
def run_setup_develop(self):
LOGGER.info("Running setup.py develop...")
activate = self.activate()
local(
'{} && python setup.py develop'.format(
activate
)
)
def venv_python_version(self):
"""
get the python version from the virtualenv/conda env/pipenv
whatever
"""
command = "{} && python -V".format(self.activate())
outp = subprocess.getoutput(command)
return _parse_python_version(outp)
@property
def python_bin_for_venv(self):
if not self.python_bin:
return self.python_bin
v = normalise_version(self.python_bin)
return v
@property
def python_bin_for_conda(self):
if not self.python_bin:
return self.python_bin
v = normalise_version(self.python_bin)
return py_version_to_conda(v)
@classmethod
def str_to_list(cls, s, delim=','):
if isinstance(s, list):
return s
if delim in s:
return [x.strip() for x in s.split(delim) if x.strip()]
return [s]
| evansde77/cirrus | src/cirrus/builder_plugin.py | Python | apache-2.0 | 4,230 | 0.001418 |
# coding=utf-8
import zipfile
import struct
import random
import os
import numpy as np
from PIL import Image, ImageEnhance
# If this is set to True, only アイウエオカキク characters will be extracted,
# which make for a faster training time with better accuracy at the cost
# of learning less characters.
REDUCED_TRAINING_SET = True
def main():
extract_zip()
unpack_katakana()
# Method definitions
def relative_path(path):
return os.path.dirname(os.path.realpath(__file__)) + '/' + path
def extract_zip():
output_dir = relative_path('raw/ETL1')
if not os.path.exists(output_dir):
print 'Extracting raw/ETL1.zip...'
with zipfile.ZipFile(relative_path('raw/ETL1.zip'), 'r') as file:
file.extractall(relative_path('raw'))
print 'raw/ETL1.zip extracted!'
def unpack_katakana():
output_dir = relative_path('katakana')
if not os.path.exists(output_dir):
print 'Unpacking katakana...'
os.makedirs(output_dir)
if REDUCED_TRAINING_SET:
datasets = [('07', 11288)]
else:
datasets = [
('07', 11288),
('08', 11288),
('09', 11287), # TODO ナ(NA) on Sheet 2672 is missing
('10', 11288),
('11', 11288),
('12', 11287), # TODO リ(RI) on Sheet 2708 is missing
('13', 4233),
]
with open(relative_path('katakana/categories.csv'), 'w') as categories_file:
with open(relative_path('katakana/data'), 'w') as data_file:
categories_file.write('category,katakana_character')
classification = []
categories = []
count = 0
datasets_count = len(datasets)
for dataset in range(datasets_count):
dataset_suffix, dataset_size = datasets[dataset]
with open(relative_path('raw/ETL1/ETL1C_' + dataset_suffix), 'r') as file:
for i in range(dataset_size):
file.seek(i * 2052 + 2)
character = file.read(2).strip()
if character not in categories:
categories.append(character)
categories_file.write('\n{},{}'.format(categories.index(character), character))
file.seek(i * 2052 + 33)
prepare_image(data_file, file.read(2016))
classification.append((count, categories.index(character)))
count = count + 1
if i % 1000 == 0:
print 'Unpacking dataset {}/{} - {}% ...'.format(
dataset + 1, datasets_count, int((float(i) / dataset_size) * 100))
with open(relative_path('katakana/classification.csv'), 'w') as classification_file:
classification_file.write('position,category')
random.shuffle(classification)
for position, category in classification:
classification_file.write('\n{},{}'.format(position, category))
print 'Katakana unpacked!'
def prepare_image(data_file, image_data):
image = Image.frombytes('F', (64, 63), image_data, 'bit', 4)
image = image.convert('P')
image = ImageEnhance.Brightness(image).enhance(40)
image = image.resize((76, 76))
image = image.crop((6, 6, 70, 70))
new_img = Image.new('1', (64, 64))
new_img.paste(image, (0, 0))
new_img = Image.eval(new_img, lambda x: not x)
data_file.write(np.packbits(np.array(new_img.getdata())))
# Runtime
if __name__ == '__main__':
main()
| NoelDeMartin/Japanese-Character-Recognition | data/prepare.py | Python | mit | 3,120 | 0.023871 |
from __future__ import absolute_import
from celery import shared_task
import os.path
import logging
import csv
from django.core.exceptions import ObjectDoesNotExist
from .RandomAuthorSet import RandomAuthorSet
from ..CitationFinder import CitationFinder, EmptyPublicationSetException
from scholarly_citation_finder import config
from scholarly_citation_finder.lib.file import create_dir
logger = logging.getLogger(__name__)
AUTHOR_SET_FILENAME = 'authors.csv'
@shared_task
def evaluation_create_author_set(name, setsize, num_min_publications, database='mag'):
'''
Task to create a random author set.
:param name: Evaluation name
:param setsize: Size of the site, i.e. the number of authors
:param num_min_publications: Minimum number of an author's publications
:param database: Database name
'''
dir = create_dir(os.path.join(config.EVALUATION_DIR, name))
author_set = RandomAuthorSet(database=database)
logger.info('{} -- create random author set of size {}'.format(name, setsize))
author_set.create(setsize=setsize, num_min_publications=num_min_publications)
logger.info('{} -- create random author set done'.format(name))
filename_author_set = author_set.store(os.path.join(dir, AUTHOR_SET_FILENAME))
#for author_info in e.get():
# author_id = author_info['author_id']
# pass
return filename_author_set
@shared_task
def evaluation_run(name, strategies):
'''
Evaluation run task.
:param name: Evaluation name
:param strategies: List of strategies
'''
evaluation_dir = os.path.join(config.EVALUATION_DIR, name)
with open(os.path.join(evaluation_dir, AUTHOR_SET_FILENAME)) as author_set_file:
reader = csv.DictReader(author_set_file)
for row in reader:
if len(row) == 3:
try:
strategies_result = evaluation_citations(author_id=row['author_id'], evaluation_name=name, strategies=strategies)
for strategy_result in strategies_result:
__store_evaluation_result(path=evaluation_dir,
filename=strategy_result['strategy_name'],
row=[row['author_id'],
row['num_citations'],
row['num_publications'],
strategy_result['num_inspected_publications'],
strategy_result['num_citations']])
except(EmptyPublicationSetException):
continue
except(ObjectDoesNotExist) as e:
raise e
return True
@shared_task
def evaluation_citations(author_id, strategies=None, evaluation_name='default'):
'''
Evaluation run view.
:param author_id: Author ID
:param strategies: List of strategies
:param evaluation_name: Evaluation name
:raise ObjectDoesNotExits:
:raise MultipleObjectsReturned:
:raise EmptyPublicationSetException:
'''
result = []
try:
citationfinder = CitationFinder(database='mag', evaluation=True)
author_id, length_publication_set = citationfinder.publication_set.set_by_author(id=int(author_id))
logger.info('{} author: set {} publications'.format(author_id, length_publication_set))
citationfinder.load_stored_citations()
for strategy in strategies:
strategy_name = citationfinder.run(strategy)
logger.info('{}: finished strategy "{}"'.format(author_id, strategy_name))
num_inspected_publications, num_citations = citationfinder.store_evaluation(path=create_dir(os.path.join(config.EVALUATION_DIR, evaluation_name, strategy_name)),
filename=author_id)
result.append({'strategy_name': strategy_name,
'num_inspected_publications': num_inspected_publications,
'num_citations': num_citations})
return result
except(ObjectDoesNotExist) as e:
raise e
except(EmptyPublicationSetException) as e:
raise e
def __store_evaluation_result(path, filename, row):
'''
Store evaluation result.
:param path: Path
:param filename: Name of the file
:param row: Row to append to the file
'''
filename = os.path.join(path, 'meta_{}.csv'.format(filename))
file_exists = os.path.isfile(filename)
try:
with open(filename, 'a+') as csvfile:
writer = csv.writer(csvfile)
if not file_exists:
writer.writerow(['author_id', 'author_num_citations', 'author_num_publications', 'num_inspected_publications', 'num_citations'])
writer.writerow(row)
return filename
except(IOError) as e:
raise e
| citationfinder/scholarly_citation_finder | scholarly_citation_finder/apps/citation/evaluation/tasks.py | Python | mit | 5,022 | 0.005177 |
# -*- coding:utf-8 -*-
import xml.etree.ElementTree as etree
import re
import datetime
#set the output file name for the 'good' data
#needs to be to a structured format - but dump to text for now
#clean_output = 'clean_out.csv'
clean_output = 'clean.txt'
#set the dirty output file where we'll dump the awkward lines
dirty_output = 'dirty_out.txt'
#open the clean output file
f2 = open(clean_output, 'w')
#open the clean output file
f3 = open(dirty_output, 'w')
#probably a better way of doing this - but set up a list of valide months to compare against (maybe move nearer to this code?)
month_list = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August','September', 'October', 'November', 'December']
ref = ("january", "february", "march", "april", "may", "june", "july", "august", "september", "october", "november", "december")
#initialise integer values for month and day
birth_day = 0
birth_month = 0
# First function: cleans out (invisible) ascii chars 132 and 160 from some lines which was causing problems
def remove_non_ascii_1(text):
return ''.join(i for i in text if ord(i)<128) #we need no characters below this normal pronting range
#Second Function - test_line() - split the line into words and return how many there are
# (just used to spot 2-word lines which indicate a day / month line)
def test_line (text):
words = text.split()
num_words = len(words)
#print "one " + str( words[0])
#print "two " + str( words[1])
return num_words
def muso_detail_split(text):
# initialise so we can use it as a flag if fails below
#worked = True
#split the line of text using commas as a delimiter
muso_bits = text.split(',')
try: #try to convert the contents of the last item - to an integer. If it is 1928 or 1957 for example, it should work
birth_year = int(muso_bits [-1])
#Grab everything before the first comma - that seems to be a constant for the name location
muso_name = muso_bits[0]
#split that name into first, middle and surname to be returned individually - using the space as a delimiter
# putting each into a list
muso_name_list = muso_name.split(" ")
muso_forname = muso_name_list[0] #choose the first item in the list - should be the forname
muso_surname = muso_name_list[-1] # choose the last item as the last name
#if there are more than 2 items in the list, assume that the second is a middle name
if len (muso_name_list) > 2:
muso_midname = muso_name_list[1]
else:
muso_midname = ""
#chuck away the first item as we dealt with that as the names at lines 12 - 20 above
#print muso_forname
#print muso_surname
muso_bits.remove(muso_bits[0])
#chuck away the last item - it was the year of birth (line 24)
muso_bits.remove(muso_bits[-1])
#we should be left with the instruments
instrm_list = list(muso_bits)
#that's it all sorted - keep these remaining items as a list of instruments / roles which we'll return as a list
############
# Needs to find and replace / in instrument list (e.g Campise entry)
muso_obj = [muso_forname, muso_midname, muso_surname, birth_year, instrm_list]
except ValueError:
# doesn't end with a single year we can't process it for now = write it out to the dirty file (and mark *** for future reference)
f3.write(str(birth_day) + " " + str(birth_month) +"\n")
f3.write(text + "*** " +"\n")
# return empty list
muso_obj = []
return muso_obj
def create_date(d,m,y):
date1 = datetime.date(y,m,d)
return date1
def date_comp(dc):
for month in ref:
if dc in month:
return ref.index(month) + 1
def find_death(line):
line = line.strip()
list1 = line.split(',')
try:
int_year = int(list1[1])
#print(int_year)
except:
pass
#print list[0]
list1[0] = list1[0].replace(".", " ")
#print list[0]
d_m = list1[0].split(" ")
d_m[0] = d_m[0].replace(".","").lower()
int_month = date_comp(d_m[0])
int_day = d_m[-1]
return str(int_year) + "-" + str(int_month) + "-" + str(int_day)
##################################
# main code starts here #
##################################
# grab the document as an xml object
tree = etree.parse('jazz_birthdays_manual_tidy.xml')
root = tree.getroot()
for child in root:
ignore_flag = False #used to identify elements with sub-elements <ulink> (ie Youtube links) as we don't want those
dod =""
for sub_child in child:
if sub_child is not None:
# if there is a sub-elemnent (which can only be ulink)
# set the flag to true and do nothing more with that line
ignore_flag = True
if not ignore_flag: #so no sub elements - so we need to to more checks
if child.text is not None: #not an empty <para/>
line_text = child.text.encode('utf-8') #encode the text
line_text = line_text.strip() # strip leading and trailing whitespace
line_text = remove_non_ascii_1(line_text) # call the function to clean out ascii chars 132 and 160 from some lines
nw = test_line (line_text)
if nw ==2:
#it can only be a date (as only they have two elements - day / month)
words = line_text.split()
tally = 0
if words[1] in month_list:
#take it that this is a date
# update the global vaiables with day and month * ordinal values*
# We can use these to build up a datetime object for each musician's birth
# (taking the year from the muso's line below
birth_day = int(words [0])
birth_month = month_list.index(words[1]) +1
else:
#take it that it is a musician line (as we've excluded the day / month lines )
find_substr = "(or"
if find_substr in line_text:
f3.write(str(birth_day) + " " + str(birth_month) +"\n")
f3.write(line_text +"\n")
else:
# we need to find death dates and split on those
# treating the second part as the death date
# and the first part as a general musician entry
death_text =""
deceased = re.search ("\(d\.(.*)\)", line_text)
# if "(d." found use that to split the string
if deceased:
split_dec = re.split ("\(d\.(.*)\)", line_text)
line_text = split_dec [0]
death_text = split_dec[1]
muso_parts = muso_detail_split (line_text)
# returned as muso_forname, muso_midname, muso_surname, birth_year, instrm_list
#print len (muso_parts)
if len (muso_parts) > 0:
#for part in muso_parts:
# print part
#print len(muso_parts)
#print muso_parts[3]
dob = create_date (birth_day, birth_month, muso_parts[3])
#dod = create_death_date (death_text)
if deceased:
print death_text
dod = find_death (death_text)
f2.write (muso_parts[2] + "\t" + muso_parts[0] +"\t" + muso_parts [1] +"\t" + str(dob) + "\t")
for inst in muso_parts [4]:
f2.write (inst + ", ")
#f deceased:
# f2.write ("Deceased \t")
if dod != "":
f2.write(dod)
f2.write("\n")
#f2.write("\n")
#print muso_parts
#for part in muso_parts:
# print part
#f3.write(line_text +"\n")
#print len(child)
# f.close()
f2.close()
f3.close() | watty62/jazz_birthdays | old versions/extraction4.py | Python | cc0-1.0 | 7,375 | 0.03322 |
import theano
import theano.tensor as T
import numpy as np
import sys
sys.path.insert(0, '../data_loader/')
import load
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
# load data
x_train, t_train, x_test, t_test = load.cifar10(dtype=theano.config.floatX, grayscale=False)
labels_test = np.argmax(t_test, axis=1)
# reshape data
x_train = x_train.reshape((x_train.shape[0], 3, 32, 32))
x_test = x_test.reshape((x_test.shape[0], 3, 32, 32))
# define symbolic Theano variables
x = T.tensor4()
t = T.matrix()
# define model: neural network
def floatX(x):
return np.asarray(x, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.1))
def momentum(cost, params, learning_rate, momentum):
grads = theano.grad(cost, params)
updates = []
for p, g in zip(params, grads):
mparam_i = theano.shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX))
v = momentum * mparam_i - learning_rate * g
updates.append((mparam_i, v))
updates.append((p, p + v))
return updates
def model(x, w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o):
c1 = T.maximum(0, conv2d(x, w_c1) + b_c1.dimshuffle('x', 0, 'x', 'x'))
p1 = max_pool_2d(c1, (3, 3))
c2 = T.maximum(0, conv2d(p1, w_c2) + b_c2.dimshuffle('x', 0, 'x', 'x'))
p2 = max_pool_2d(c2, (2, 2))
p2_flat = p2.flatten(2)
h3 = T.maximum(0, T.dot(p2_flat, w_h3) + b_h3)
p_y_given_x = T.nnet.softmax(T.dot(h3, w_o) + b_o)
return p_y_given_x
w_c1 = init_weights((4, 3, 3, 3))
b_c1 = init_weights((4,))
w_c2 = init_weights((8, 4, 3, 3))
b_c2 = init_weights((8,))
w_h3 = init_weights((8 * 4 * 4, 100))
b_h3 = init_weights((100,))
w_o = init_weights((100, 10))
b_o = init_weights((10,))
params = [w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o]
p_y_given_x = model(x, *params)
y = T.argmax(p_y_given_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(p_y_given_x, t))
updates = momentum(cost, params, learning_rate=0.01, momentum=0.9)
# compile theano functions
train = theano.function([x, t], cost, updates=updates)
predict = theano.function([x], y)
# train model
batch_size = 50
for i in range(50):
print "iteration {}".format(i + 1)
for start in range(0, len(x_train), batch_size):
x_batch = x_train[start:start + batch_size]
t_batch = t_train[start:start + batch_size]
cost = train(x_batch, t_batch)
predictions_test = predict(x_test)
accuracy = np.mean(predictions_test == labels_test)
print "accuracy: {}\n".format(accuracy)
| JBed/Simple_Theano | 4_simple_conv_net/better_conv_net.py | Python | apache-2.0 | 2,609 | 0.004216 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import horizon
from horizon.dashboards.nova import dashboard
class InstancesAndVolumes(horizon.Panel):
name = "Instances & Volumes"
slug = 'instances_and_volumes'
dashboard.Nova.register(InstancesAndVolumes)
| andrewsmedina/horizon | horizon/horizon/dashboards/nova/instances_and_volumes/panel.py | Python | apache-2.0 | 870 | 0 |
'''
Menu for Community Scripts
Author: Christoph Stoettner
Mail: christoph.stoettner@stoeps.de
Documentation: http://scripting101.stoeps.de
Version: 5.0.1
Date: 09/19/2015
License: Apache 2.0
History: Changed by Jan Alderlieste
'''
import sys
import os
import ibmcnx.functions
import ibmcnx.menu.MenuClass
import java
from java.lang import String
from java.util import HashSet
from java.util import HashMap
# Only load commands if not initialized directly (call from menu)
# if __name__ == "__main__":
# execfile( "ibmcnx/loadCnxApps.py" )
global globdict
globdict = globals()
def docDocumentation():
print '###########################################################'
print '# #'
print '# Not implemented in the menu! #'
print '# #'
print '# call with: #'
print '# wsadmin.sh -lang jython -f ibmcnx/doc/Documentation.py #'
print '# #'
print '###########################################################'
# execfile( 'ibmcnx/doc/Documentation.py', globdict )
global globdict
globdict = globals()
doc = ibmcnx.menu.MenuClass.cnxMenu()
doc.AddItem('Show JVM Heap Sizes (ibmcnx/doc/JVMHeap.py)',
ibmcnx.functions.docJVMHeap)
doc.AddItem('Show JVM Settings (ibmcnx/doc/JVMSettings.py)',
ibmcnx.functions.docJVMSettings)
doc.AddItem('Show JVM Trace Settings (ibmcnx/doc/traceSettings.py)',
ibmcnx.functions.doctracesettings)
doc.AddItem('Show SystemOut/Err Log Sizes (ibmcnx/doc/LogFiles.py)',
ibmcnx.functions.docLogFiles)
doc.AddItem('Show all used ports (ibmcnx/doc/Ports.py)',
ibmcnx.functions.docPorts)
doc.AddItem('Show all used variables (ibmcnx/doc/Variables.py)',
ibmcnx.functions.docVariables)
doc.AddItem('Show all j2ee roles of inst. applications (ibmcnx/doc/j2eeroles.py)',
ibmcnx.functions.docj2eeroles)
doc.AddItem('Show all datasources and parameters (ibmcnx/doc/DataSources.py)',
ibmcnx.functions.docdatasources)
doc.AddItem('Show users with employee.extended role (ibmcnx/doc/ProfRoleID.py',
ibmcnx.functions.docroleid)
doc.AddItem('Show inactive user profiles (ibmcnx/doc/ProfilesInactive.py',
ibmcnx.functions.docinactiveprof)
doc.AddItem(
'Create a file with all documentation (ibmcnx/doc/Documentation.py)', docDocumentation)
doc.AddItem('Back to Main Menu (ibmcnx/menu/cnxmenu.py)',
ibmcnx.functions.cnxBackToMainMenu)
doc.AddItem("Exit", ibmcnx.functions.bye)
state_doc = 'True'
menutitle = "HCL Connections Documentation"
while state_doc == 'True':
count = len(doc.menuitems)
doc.Show(menutitle)
###########################
# # Robust error handling ##
# # only accept int ##
###########################
## Wait for valid input in while...not ###
is_valid_doc = 0
while not is_valid_doc:
try:
inputstring = '\tEnter your choice [1-' + str(count) + ']: '
n = int(raw_input(inputstring))
if n <= count and n > 0:
is_valid_doc = 1 # set it to 1 to validate input and to terminate the while..not loop
else:
print ("'%s' is not a valid menu option.") % n
except ValueError, e:
print ("'%s' is not a valid integer." % e.args[0].split(": ")[1])
# n = input( "your choice> " )
doc.Do(n - 1)
| stoeps13/ibmcnx2 | ibmcnx/menu/docs.py | Python | apache-2.0 | 3,638 | 0.002199 |
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| KrzysztofStachanczyk/Sensors-WWW-website | www/env/lib/python2.7/site-packages/django/conf/locale/de_CH/formats.py | Python | gpl-3.0 | 1,445 | 0.000692 |
"""
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
| OBIGOGIT/etch | binding-python/runtime/src/main/python/etch/compiler/__init__.py | Python | apache-2.0 | 1,061 | 0.001885 |
import os
import json
import collections
import datetime
from flask import Flask, request, current_app, make_response, session, escape, Response, jsonify
from flask_jwt_extended import JWTManager, jwt_required, create_access_token, get_jwt_identity
from flask_socketio import SocketIO
from neo4j.v1 import GraphDatabase, basic_auth
from lib.crossDomain import crossdomain
import simplekv.memory
import eventlet
#eventlet.monkey_patch()
# if sys.version_info < (3, 0):
# sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
# sys.exit(1)
config = json.load(open('./config.json'));
# Init
UPLOAD_FOLDER = os.path.dirname(os.path.realpath(__file__)) + "/uploads"
x_socketio = SocketIO()
def create_app():
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = config['auth_secret']
app.config['JWT_BLACKLIST_ENABLED'] = False
app.config['JWT_BLACKLIST_STORE'] = simplekv.memory.DictStore()
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = 'all'
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = datetime.timedelta(minutes=15)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
driver = GraphDatabase.driver(config['database_url'], auth=basic_auth(config['database_user'],config['database_pass']))
db_session = driver.session()
# start jwt service
jwt = JWTManager(app)
# Import blueprints
from auth import auth_blueprint
from banner import banner_blueprint
from people import people_blueprint
from organizations import organizations_blueprint
from repos import repositories_blueprint
from schema import schema_blueprint
from data import data_blueprint
from search import search_blueprint
from upload import upload_blueprint
from export import export_blueprint
from list import list_blueprint
from .sockets import sockets as socket_blueprint
# register API modules
app.register_blueprint(banner_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(people_blueprint)
app.register_blueprint(organizations_blueprint)
app.register_blueprint(repositories_blueprint)
app.register_blueprint(schema_blueprint)
app.register_blueprint(search_blueprint)
app.register_blueprint(data_blueprint)
app.register_blueprint(upload_blueprint)
app.register_blueprint(socket_blueprint)
app.register_blueprint(export_blueprint)
app.register_blueprint(list_blueprint)
x_socketio.init_app(app)
return app, jwt
| inquisite/Inquisite-Core | api/__init__.py | Python | gpl-3.0 | 2,483 | 0.003222 |
import json
import datetime
from django import forms, http
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.template import Context, loader
from django.utils.encoding import force_unicode
from django.views.generic import CreateView, UpdateView, DetailView, ListView, DeleteView
from braces.views import LoginRequiredMixin
from .models import Post, Comment
from .forms import PostForm, CommentForm
class PostActionMixin(object):
def form_valid(self, form):
msg = 'Post {0}!'.format(self.action)
messages.info(self.request, msg)
return super(PostActionMixin, self).form_valid(form)
class StaffRequiredMixin(object):
"""
Mixin allows you to require a user with `is_staff` set to True.
"""
#adapted from SuperuserRequiredMixin from braces.views
login_url = settings.LOGIN_URL # LOGIN_URL from project settings
raise_exception = False # Default whether to raise an exception to none
redirect_field_name = REDIRECT_FIELD_NAME # Set by django.contrib.auth
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff: # If the user is a standard user,
if self.raise_exception: # *and* if an exception was desired
raise PermissionDenied # return a forbidden response.
else:
return redirect_to_login(request.get_full_path(),
self.login_url,
self.redirect_field_name)
return super(StaffRequiredMixin, self).dispatch(request,
*args, **kwargs)
class PostCreateView(LoginRequiredMixin, StaffRequiredMixin, PostActionMixin, CreateView):
form = PostForm
model = Post
action = 'created'
def get_initial(self):
initial = super(PostCreateView, self).get_initial()
initial['pub_date'] = datetime.date.today()
return initial
class PostUpdateView(LoginRequiredMixin, StaffRequiredMixin, PostActionMixin, UpdateView):
form = PostForm
model = Post
action = 'updated'
class PostDetailView(DetailView):
model = Post
def get_context_data(self, **kwargs):
context = super(PostDetailView, self).get_context_data(**kwargs)
form = context['form'] = CommentForm(initial={'post': context['post'].id})
form.fields['post'].widget = forms.HiddenInput()
post = context['post']
comments = post.comment_set.order_by('modified')
context['comments'] = comments
return context
class PostListView(ListView):
model = Post
template_name = 'blog/post_list.html'
queryset = Post.objects.order_by('-pub_date', '-created')
paginate_by = 4
class PostDeleteView(LoginRequiredMixin, StaffRequiredMixin, PostActionMixin, DeleteView):
model = Post
action = 'deleted'
success_url = '/'
class CommentCreateView(CreateView):
model = Comment
form = CommentForm
def form_valid(self, form):
msg = 'Thanks for your comment!'
messages.info(self.request, msg)
normal_retval = super(CommentCreateView, self).form_valid(form)
if self.request.POST.get('format', 'html') != 'json':
return normal_retval
t = loader.get_template('blog/comment.html')
context = Context(self.get_context_data())
return self.get_json_response(
json.dumps({
'success': True,
'messages': [{'message': m.message, 'tags': m.tags} for m in
messages.get_messages(self.request)],
'comment': t.render(context)
})
)
def form_invalid(self, form):
normal_retval = super(CommentCreateView, self).form_invalid(form)
if self.request.POST.get('format', 'html') != 'json':
return normal_retval
return self.get_json_response(
json.dumps({
'success': False,
'errors': {k: v.as_ul() if len(v) > 1 else force_unicode(v[0]) for k, v in form.errors.iteritems()}
})
)
def get_json_response(self, content, **httpresponse_kwargs):
"Construct an `HttpResponse` object."
return http.HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
| lambacck/simpleblog | simpleblog/blog/views.py | Python | mit | 4,533 | 0.001765 |
#!/usr/bin/env python
# Run the various build scripts
import sys
import os
from parse import parse_machines
from machines import machines
from assemblies import assemblies
from vitamins import vitamins
from printed import printed
from guides import guides
from publish import publish
def build(do_publish=0):
print("Build")
print("-----")
outfile = 'hardware.json'
oldfile = 'backup.json'
print("Backup current json...")
oldjso = None
if os.path.isfile(outfile) and not os.path.isfile(oldfile):
os.rename(outfile, oldfile)
errorlevel = 0
errorlevel += parse_machines()
if errorlevel == 0:
errorlevel += vitamins()
if errorlevel == 0:
errorlevel += printed()
if errorlevel == 0:
errorlevel += assemblies()
if errorlevel == 0:
errorlevel += machines()
if errorlevel == 0:
errorlevel += guides()
if errorlevel == 0 and do_publish > 0:
publish()
# if everything is ok then delete backup - no longer required
if errorlevel == 0:
os.remove(oldfile)
return errorlevel
if __name__ == '__main__':
if len(sys.argv) == 2:
sys.exit(build(sys.argv[1]))
else:
sys.exit(build(0)) | Axford/AFRo | hardware/ci/build.py | Python | mit | 1,275 | 0.010196 |
class OutlineAlignmentParams (object):
# FILE_IN_1 = 'img/render.png'
# SIZE_IN_1 = (40, 0, 1026, 632)
# BACKGROUND_REMOVAL_1 = None
# FILE_IN_2 = 'img/texture.png'
# SIZE_IN_2 = (40, 0, 1026, 632)
# BACKGROUND_REMOVAL_2 = None
# FILE_IN_1 = 'img2/render-2.png'
# SIZE_IN_1 = None
# BACKGROUND_REMOVAL_1 = None
# FILE_IN_2 = 'img2/texture-2.png'
# SIZE_IN_2 = None
# BACKGROUND_REMOVAL_2 = "red_background"
FILE_IN_1 = 'img3/MeshPurple.png'
SIZE_IN_1 = None
BACKGROUND_REMOVAL_1 = None
FILE_IN_2 = 'img3/CleanPurple.png'
SIZE_IN_2 = None
BACKGROUND_REMOVAL_2 = "white_background"
LARGE_APPROX = 7
SMALL_APPROX = 2.5
| ngtrhieu/outline_alignment | outline_alignment_params.py | Python | mit | 701 | 0.001427 |
import mango
from ._PValueTest import *
from ._GeneralisedChiSquaredTest import *
__all__ = [s for s in dir() if not s.startswith('_')]
if mango.haveRestricted:
from ._fmmTest import *
from ._BinnedGmmEmTest import *
from ._SummedBinnedGmmEmTest import *
| pymango/pymango | misc/python/mango/fmmTest/__init__.py | Python | bsd-2-clause | 316 | 0.012658 |
#!/usr/bin/env python3
import traceback
from telethon_examples.interactive_telegram_client \
import InteractiveTelegramClient
def load_settings(path='api/settings'):
"""Loads the user settings located under `api/`"""
result = {}
with open(path, 'r', encoding='utf-8') as file:
for line in file:
value_pair = line.split('=')
left = value_pair[0].strip()
right = value_pair[1].strip()
if right.isnumeric():
result[left] = int(right)
else:
result[left] = right
return result
if __name__ == '__main__':
# Load the settings and initialize the client
settings = load_settings()
kwargs = {}
if settings.get('socks_proxy'):
import socks # $ pip install pysocks
host, port = settings['socks_proxy'].split(':')
kwargs = dict(proxy=(socks.SOCKS5, host, int(port)))
client = InteractiveTelegramClient(
session_user_id=str(settings.get('session_name', 'anonymous')),
user_phone=str(settings['user_phone']),
api_id=settings['api_id'],
api_hash=str(settings['api_hash']),
**kwargs)
print('Initialization done!')
try:
client.run()
except Exception as e:
print('Unexpected error ({}): {} at\n{}'.format(
type(e), e, traceback.format_exc()))
finally:
client.disconnect()
print('Thanks for trying the interactive example! Exiting...')
| kyasabu/Telethon | try_telethon.py | Python | mit | 1,492 | 0 |
#!/usr/bin/env python3
"""Class interacts with devices supporting BRIDGE-MIB."""
from collections import defaultdict
from switchmap.snmp.base_query import Query
from switchmap.snmp import mib_if
from switchmap.utils import general
def get_query():
"""Return this module's Query class."""
return BridgeQuery
def init_query(snmp_object):
"""Return initialize and return this module's Query class."""
return BridgeQuery(snmp_object)
class BridgeQuery(Query):
"""Class interacts with devices supporting BRIDGE-MIB.
Args:
None
Returns:
None
Key Methods:
supported: Queries the device to determine whether the MIB is
supported using a known OID defined in the MIB. Returns True
if the device returns a response to the OID, False if not.
layer1: Returns all needed layer 1 MIB information from the device.
Keyed by OID's MIB name (primary key), ifIndex (secondary key)
"""
def __init__(self, snmp_object):
"""Function for intializing the class.
Args:
snmp_object: SNMP Interact class object from snmp_manager.py
Returns:
None
"""
# Assign SNMP object
self._snmp_object = snmp_object
# Get one OID entry in MIB (dot1dBasePortIfIndex)
test_oid = '.1.3.6.1.2.1.17.4.3.1.2'
# Determine whether LLDP is keyed on ifIndex or BasePortIndex
# If the ifindex method is being used, all the lldpLocPortIds
# will match the ifindex values
self._ifindex = mib_if.IfQuery(snmp_object).ifindex()
super().__init__(snmp_object, test_oid, tags=['layer1'])
def layer1(self):
"""Get layer 1 data from device.
Args:
None
Returns:
final: Final results
"""
# Return
return self._macaddresstable()
def _macaddresstable(self):
"""Return dict of the devices MAC address table.
Args:
None
Returns:
final: Dict of MAC addresses keyed by ifIndex
"""
# Initialize key variables
final = defaultdict(lambda: defaultdict(dict))
done = False
# Check if Cisco VLANS are supported
oid_vtpvlanstate = '.1.3.6.1.4.1.9.9.46.1.3.1.1.2'
oid_exists = self._snmp_object.oid_exists(oid_vtpvlanstate)
if bool(oid_exists) is True:
final = self._macaddresstable_cisco()
done = True
# Check if Juniper VLANS are supported
if done is False:
oid_dot1qvlanstaticname = '.1.3.6.1.2.1.17.7.1.4.3.1.1'
oid_exists = self._snmp_object.oid_exists(
oid_dot1qvlanstaticname)
if bool(oid_exists) is True:
final = self._macaddresstable_juniper()
# Return
return final
def _macaddresstable_cisco(self):
"""Return dict of the Cisco device's MAC address table.
Args:
None
Returns:
final: Dict of MAC addresses keyed by ifIndex
"""
# Initialize key variables
data_dict = defaultdict(lambda: defaultdict(dict))
final = defaultdict(lambda: defaultdict(dict))
context_names = ['']
context_style = 0
# Check if Cisco VLANS are supported
oid_vtpvlanstate = '.1.3.6.1.4.1.9.9.46.1.3.1.1.2'
oid_exists = self._snmp_object.oid_exists(oid_vtpvlanstate)
if bool(oid_exists) is True:
# Get the vlantype
oid_vtpvlantype = '.1.3.6.1.4.1.9.9.46.1.3.1.1.3'
vtpvlantype = self._snmp_object.swalk(
oid_vtpvlantype, normalized=True)
# Get VLANs and their states
vtpvlanstate = self._snmp_object.swalk(
oid_vtpvlanstate, normalized=True)
# Get the style of context name to be used for this type of device
for vlan, state in vtpvlanstate.items():
if int(state) == 1 and int(vtpvlantype[vlan]) == 1:
context_style = self._cisco_context_style(vlan)
break
# Append additional vlan context names to query.
# Only if Ethernet VLANs (pysnmp dies silently otherwise)
for vlan, state in vtpvlanstate.items():
if int(state) == 1 and int(vtpvlantype[vlan]) == 1:
cisco_context = _cisco_vlan_context(vlan, context_style)
context_names.append(cisco_context)
# Get key information
macs = self._dot1dtpfdbaddress(context_names=context_names)
dot1dtpfdbport = self._dot1dtpfdbport(context_names=context_names)
baseportifindex = self.dot1dbaseport_2_ifindex()
# Create a dict keyed by ifIndex
for decimal_macaddress, hex_macaddress in macs.items():
# Sometimes an overloaded system running this script may have
# timeouts retrieving data that should normally be there.
# This prevents the script from crashing when this occurs
if bool(dot1dtpfdbport[decimal_macaddress]) is False:
continue
# Get ifIndex from dot1dBasePort
dot1dbaseport = int(dot1dtpfdbport[decimal_macaddress])
ifindex = baseportifindex[dot1dbaseport]
# With multi-threading sometimes baseportifindex has empty values.
if bool(ifindex) is False:
continue
# Assign MAC addresses to ifIndex
if ifindex not in data_dict:
data_dict[ifindex] = [hex_macaddress]
else:
data_dict[ifindex].append(hex_macaddress)
# Assign MACs to secondary key for final result
for ifindex, hex_macaddresses in data_dict.items():
final[ifindex]['jm_macs'] = []
for next_mac in hex_macaddresses:
final[ifindex]['jm_macs'].append(next_mac)
# Return
return final
def _macaddresstable_juniper(self):
"""Return dict of the Juniper device's MAC address table.
Args:
None
Returns:
final: Dict of MAC addresses keyed by ifIndex
"""
# Initialize key variables
final = defaultdict(lambda: defaultdict(dict))
dot1dbaseport_macs = {}
# Check if Juniper VLANS are supported
oid_dot1qvlanstaticname = '.1.3.6.1.2.1.17.7.1.4.3.1.1'
oid_exists = self._snmp_object.oid_exists(oid_dot1qvlanstaticname)
if bool(oid_exists) is True:
# Create a dict of MAC addresses found
mac_dict = self._dot1qtpfdbport()
for decimal_macaddress, dot1dbaseport in mac_dict.items():
# Convert decimal mac to hex
# (Only use the last 6 digits in the decimal_macaddress, first
# digit is the vlan number)
hex_macaddress = ''
mac_bytes = decimal_macaddress.split('.')[-6:]
for mac_byte in mac_bytes:
hex_macaddress = (
'{}{}'.format(
hex_macaddress, hex(int(mac_byte))[2:].zfill(2)))
# Assign MAC to baseport index
if dot1dbaseport in dot1dbaseport_macs:
dot1dbaseport_macs[dot1dbaseport].append(hex_macaddress)
else:
dot1dbaseport_macs[dot1dbaseport] = [hex_macaddress]
# Assign MACs to ifindex
baseportifindex = self.dot1dbaseport_2_ifindex()
for dot1dbaseport, ifindex in baseportifindex.items():
if dot1dbaseport in dot1dbaseport_macs:
final[ifindex][
'jm_macs'] = dot1dbaseport_macs[dot1dbaseport]
# Return
return final
def _dot1dtpfdbport(self, context_names=None):
"""Return dict of BRIDGE-MIB dot1dTpFdbPort data.
Args:
None
Returns:
data_dict: Dict of dot1dTpFdbPort using the OID nodes
excluding the OID root as key
"""
# Initialize key variables
if context_names is None:
context_names = ['']
data_dict = defaultdict(dict)
# Process values
oid = '.1.3.6.1.2.1.17.4.3.1.2'
for context_name in context_names:
results = self._snmp_object.swalk(
oid, normalized=False, context_name=context_name)
for key, value in results.items():
new_key = key[len(oid):]
data_dict[new_key] = value
# Return data
return data_dict
def _dot1qtpfdbport(self):
"""Return dict of BRIDGE-MIB dot1qTpFdbPort data.
Args:
None
Returns:
data_dict: Dict of dot1qTpFdbPort using the OID nodes
excluding the OID root as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
vlan_dict = defaultdict(dict)
vlans = []
# Process dot1qvlanstaticname OID
oid_dot1qvlanstaticname = '.1.3.6.1.2.1.17.7.1.4.3.1.1'
oid_exists = self._snmp_object.oid_exists(oid_dot1qvlanstaticname)
if bool(oid_exists) is True:
results = self._snmp_object.walk(
oid_dot1qvlanstaticname, normalized=True)
for key, value in results.items():
vlan_dict[key] = value
for key, _ in vlan_dict.items():
vlans.append(key)
# Process values
oid = '.1.3.6.1.2.1.17.7.1.2.2.1.2'
for vlan in vlans:
new_oid = '{}.{}'.format(oid, vlan)
results = self._snmp_object.swalk(new_oid, normalized=False)
for key, value in results.items():
new_key = key[len(oid):]
data_dict[new_key] = value
# Return data
return data_dict
def _dot1dtpfdbaddress(self, context_names=None):
"""Return dict of BRIDGE-MIB dot1dTpFdbAddress data.
Args:
None
Returns:
data_dict: Dict of dot1dTpFdbAddress using the OID nodes
excluding the OID root as key
"""
# Initialize key variables
if context_names is None:
context_names = ['']
data_dict = defaultdict(dict)
# Process values
oid = '.1.3.6.1.2.1.17.4.3.1.1'
for context_name in context_names:
results = self._snmp_object.swalk(
oid, normalized=False, context_name=context_name)
for key, mac_value in results.items():
# Assign the mac address to the dictionary
new_key = key[len(oid):]
data_dict[new_key] = general.octetstr_2_string(mac_value)
# Return data
return data_dict
def dot1dbaseport_2_ifindex(self, context_names=None):
"""Return dict of BRIDGE-MIB dot1dBasePortIfIndex data.
Args:
None
Returns:
data_dict: Dict of dot1dBasePortIfIndex with dot1dBasePort as key.
"""
# Initialize key variables
offset = 0
if context_names is None:
context_names = ['']
data_dict = defaultdict(dict)
# Get the difference between ifIndex and dot1dBasePortIfIndex
oid = '.1.3.6.1.2.1.17.1.4.1.2'
results = self._snmp_object.swalk(oid, normalized=True)
for _bridge_index, ifindex in results.items():
bridge_index = int(_bridge_index)
offset = int(ifindex) - bridge_index
break
# Populate the dictionary keyed by dot1dBasePortIfIndex
for ifindex, _ in sorted(self._ifindex.items()):
bridge_index = ifindex - offset
data_dict[bridge_index] = ifindex
# Return data
return data_dict
def _cisco_context_style(self, vlan):
"""Return style value to use to query VLAN data on a cisco switch.
Args:
vlan: Number of vlan
Returns:
cisco_style: Style of context for formatting VLAN SNMP contexts
"""
# Initialize key variables
cisco_style = 0
styles = [0, 1]
# Try all available styles
for style in styles:
context_names = [_cisco_vlan_context(vlan, style)]
result = self._dot1dtpfdbaddress(context_names=context_names)
if bool(result) is True:
cisco_style = style
break
# Return
return cisco_style
def _cisco_vlan_context(vlan, context_style):
"""Return dict of BRIDGE-MIB dot1dBasePortIfIndex data.
Args:
vlan: Number of vlan
context_style: Value of the context style to use
Returns:
cisco_context: SNMP context string
"""
# Create context string
if context_style == 0:
# Create context for older Cisco systems
cisco_context = '{}'.format(vlan)
else:
# Create context for newer Cisco systems
cisco_context = 'vlan-{}'.format(vlan)
# Return
return cisco_context
def _snmp_octetstr_2_string(binary_value):
"""Convert SNMP OCTETSTR to string.
Args:
binary_value: Binary value to convert
Returns:
result: String equivalent of binary_value
"""
# Convert and return
result = ''.join(
['%0.2x' % ord(_) for _ in binary_value.decode('utf-8')])
return result.lower()
| palisadoes/switchmap-ng | switchmap/snmp/mib_bridge.py | Python | apache-2.0 | 13,577 | 0 |
"""Tests for the DFS module"""
import unittest
from dfs import dfsTraverse
class test_dfsTraverse(unittest.TestCase):
"""Test the correct order in traversing a graph"""
def setUp(self):
"""Create a graph and a tuple with the correct traverse"""
self.correctResTup = ('a', 'b', 'e', 'g', 'f', 'c', 'h', 'd')
self.graphDict = {'a': ('b', 'g', 'd'),
'b': ('e', 'a', 'f'),
'd': ('a', 'f'),
'e': ('b', 'g'),
'g': ('e', 'a'),
'f': ('b', 'd', 'c'),
'c': ('f', 'h'),
'h': ('c')}
def test_traverse(self):
"""Test the traverse function"""
result = dfsTraverse(self.graphDict, 'a')
self.assertEqual(result, self.correctResTup)
if __name__ == '__main__':
unittest.main()
| radome/algorithms_and_data_structures | Python/test/test_dfs.py | Python | apache-2.0 | 911 | 0 |
from easygui import *
'''
Escribe un programa que pida un número (el número de notas que vamos a introducir). Después pedirá las notas y calculará la media.
'''
# Etiqueta media inicializada a 0
total = 0
numnotas = int(enterbox('Introduce el número de notas: '))
# for ...range para recorrer el numero de notas
for num in range(numnotas):
nota = int(enterbox('Introduce la nota nº %d : ' %(num+1)))
# Incrementando resultado en cada iteración
total += nota
# Mostrar resultado
media = total / float(numnotas)
msgbox('El resultado es %.2f' %media)
| txtbits/daw-python | primeros ejercicios/Ejercicios de acumular números/ejercicio3.py | Python | mit | 584 | 0.012153 |
import unittest
from unittest import TestCase
from array import array as array_
from genty import genty, genty_dataset
import numpy as np
from auditok import signal as signal_
from auditok import signal_numpy
@genty
class TestSignal(TestCase):
def setUp(self):
self.data = b"012345679ABC"
self.numpy_fmt = {"b": np.int8, "h": np.int16, "i": np.int32}
@genty_dataset(
int8_mono=(1, [[48, 49, 50, 51, 52, 53, 54, 55, 57, 65, 66, 67]]),
int16_mono=(2, [[12592, 13106, 13620, 14134, 16697, 17218]]),
int32_mono=(4, [[858927408, 926299444, 1128415545]]),
int8_stereo=(1, [[48, 50, 52, 54, 57, 66], [49, 51, 53, 55, 65, 67]]),
int16_stereo=(2, [[12592, 13620, 16697], [13106, 14134, 17218]]),
int32_3channel=(4, [[858927408], [926299444], [1128415545]]),
)
def test_to_array(self, sample_width, expected):
channels = len(expected)
expected = [
array_(signal_.FORMAT[sample_width], xi) for xi in expected
]
result = signal_.to_array(self.data, sample_width, channels)
result_numpy = signal_numpy.to_array(self.data, sample_width, channels)
self.assertEqual(result, expected)
self.assertTrue((result_numpy == np.asarray(expected)).all())
self.assertEqual(result_numpy.dtype, np.float64)
@genty_dataset(
int8_1channel_select_0=(
"b",
1,
0,
[48, 49, 50, 51, 52, 53, 54, 55, 57, 65, 66, 67],
),
int8_2channel_select_0=("b", 2, 0, [48, 50, 52, 54, 57, 66]),
int8_3channel_select_0=("b", 3, 0, [48, 51, 54, 65]),
int8_3channel_select_1=("b", 3, 1, [49, 52, 55, 66]),
int8_3channel_select_2=("b", 3, 2, [50, 53, 57, 67]),
int8_4channel_select_0=("b", 4, 0, [48, 52, 57]),
int16_1channel_select_0=(
"h",
1,
0,
[12592, 13106, 13620, 14134, 16697, 17218],
),
int16_2channel_select_0=("h", 2, 0, [12592, 13620, 16697]),
int16_2channel_select_1=("h", 2, 1, [13106, 14134, 17218]),
int16_3channel_select_0=("h", 3, 0, [12592, 14134]),
int16_3channel_select_1=("h", 3, 1, [13106, 16697]),
int16_3channel_select_2=("h", 3, 2, [13620, 17218]),
int32_1channel_select_0=(
"i",
1,
0,
[858927408, 926299444, 1128415545],
),
int32_3channel_select_0=("i", 3, 0, [858927408]),
int32_3channel_select_1=("i", 3, 1, [926299444]),
int32_3channel_select_2=("i", 3, 2, [1128415545]),
)
def test_extract_single_channel(self, fmt, channels, selected, expected):
result = signal_.extract_single_channel(
self.data, fmt, channels, selected
)
expected = array_(fmt, expected)
expected_numpy_fmt = self.numpy_fmt[fmt]
self.assertEqual(result, expected)
result_numpy = signal_numpy.extract_single_channel(
self.data, self.numpy_fmt[fmt], channels, selected
)
self.assertTrue(all(result_numpy == expected))
self.assertEqual(result_numpy.dtype, expected_numpy_fmt)
@genty_dataset(
int8_2channel=("b", 2, [48, 50, 52, 54, 61, 66]),
int8_4channel=("b", 4, [50, 54, 64]),
int16_1channel=("h", 1, [12592, 13106, 13620, 14134, 16697, 17218]),
int16_2channel=("h", 2, [12849, 13877, 16958]),
int32_3channel=("i", 3, [971214132]),
)
def test_compute_average_channel(self, fmt, channels, expected):
result = signal_.compute_average_channel(self.data, fmt, channels)
expected = array_(fmt, expected)
expected_numpy_fmt = self.numpy_fmt[fmt]
self.assertEqual(result, expected)
result_numpy = signal_numpy.compute_average_channel(
self.data, self.numpy_fmt[fmt], channels
)
self.assertTrue(all(result_numpy == expected))
self.assertEqual(result_numpy.dtype, expected_numpy_fmt)
@genty_dataset(
int8_2channel=(1, [48, 50, 52, 54, 61, 66]),
int16_2channel=(2, [12849, 13877, 16957]),
)
def test_compute_average_channel_stereo(self, sample_width, expected):
result = signal_.compute_average_channel_stereo(
self.data, sample_width
)
fmt = signal_.FORMAT[sample_width]
expected = array_(fmt, expected)
self.assertEqual(result, expected)
@genty_dataset(
int8_1channel=(
"b",
1,
[[48, 49, 50, 51, 52, 53, 54, 55, 57, 65, 66, 67]],
),
int8_2channel=(
"b",
2,
[[48, 50, 52, 54, 57, 66], [49, 51, 53, 55, 65, 67]],
),
int8_4channel=(
"b",
4,
[[48, 52, 57], [49, 53, 65], [50, 54, 66], [51, 55, 67]],
),
int16_2channel=(
"h",
2,
[[12592, 13620, 16697], [13106, 14134, 17218]],
),
int32_3channel=("i", 3, [[858927408], [926299444], [1128415545]]),
)
def test_separate_channels(self, fmt, channels, expected):
result = signal_.separate_channels(self.data, fmt, channels)
expected = [array_(fmt, exp) for exp in expected]
expected_numpy_fmt = self.numpy_fmt[fmt]
self.assertEqual(result, expected)
result_numpy = signal_numpy.separate_channels(
self.data, self.numpy_fmt[fmt], channels
)
self.assertTrue((result_numpy == expected).all())
self.assertEqual(result_numpy.dtype, expected_numpy_fmt)
@genty_dataset(
simple=([300, 320, 400, 600], 2, 52.50624901923348),
zero=([0], 2, -200),
zeros=([0, 0, 0], 2, -200),
)
def test_calculate_energy_single_channel(self, x, sample_width, expected):
x = array_(signal_.FORMAT[sample_width], x)
energy = signal_.calculate_energy_single_channel(x, sample_width)
self.assertEqual(energy, expected)
energy = signal_numpy.calculate_energy_single_channel(x, sample_width)
self.assertEqual(energy, expected)
@genty_dataset(
min_=(
[[300, 320, 400, 600], [150, 160, 200, 300]],
2,
min,
46.485649105953854,
),
max_=(
[[300, 320, 400, 600], [150, 160, 200, 300]],
2,
max,
52.50624901923348,
),
)
def test_calculate_energy_multichannel(
self, x, sample_width, aggregation_fn, expected
):
x = [array_(signal_.FORMAT[sample_width], xi) for xi in x]
energy = signal_.calculate_energy_multichannel(
x, sample_width, aggregation_fn
)
self.assertEqual(energy, expected)
energy = signal_numpy.calculate_energy_multichannel(
x, sample_width, aggregation_fn
)
self.assertEqual(energy, expected)
if __name__ == "__main__":
unittest.main()
| amsehili/auditok | tests/test_signal.py | Python | mit | 6,986 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Densely Connected Convolutional Networks.
Reference [
Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow_examples.models.densenet import distributed_train
from tensorflow_examples.models.densenet import utils
class DenseNetDistributedBenchmark(tf.test.Benchmark):
def __init__(self, output_dir=None, **kwargs):
self.output_dir = output_dir
def benchmark_with_function_custom_loops(self):
kwargs = utils.get_cifar10_kwargs()
self._run_and_report_benchmark(**kwargs)
def benchmark_with_function_custom_loops_300_epochs_2_gpus(self):
kwargs = utils.get_cifar10_kwargs()
kwargs.update({'epochs': 300, 'data_format': 'channels_first',
'bottleneck': False, 'compression': 1., 'num_gpu': 2,
'batch_size': 128})
self._run_and_report_benchmark(**kwargs)
def benchmark_with_function_custom_loops_300_epochs_8_gpus(self):
kwargs = utils.get_cifar10_kwargs()
kwargs.update({'epochs': 300, 'data_format': 'channels_first',
'bottleneck': False, 'compression': 1., 'num_gpu': 8,
'batch_size': 512})
self._run_and_report_benchmark(**kwargs)
def _run_and_report_benchmark(self, top_1_min=.944, top_1_max=.949, **kwargs):
"""Run the benchmark and report metrics.report.
Args:
top_1_min: Min value for top_1 accuracy. Default range is SOTA.
top_1_max: Max value for top_1 accuracy.
**kwargs: All args passed to the test.
"""
start_time_sec = time.time()
train_loss, train_acc, _, test_acc = distributed_train.main(**kwargs)
wall_time_sec = time.time() - start_time_sec
metrics = []
metrics.append({'name': 'accuracy_top_1',
'value': test_acc,
'min_value': top_1_min,
'max_value': top_1_max})
metrics.append({'name': 'training_accuracy_top_1',
'value': train_acc})
metrics.append({'name': 'train_loss',
'value': train_loss})
self.report_benchmark(wall_time=wall_time_sec, metrics=metrics)
if __name__ == '__main__':
tf.test.main()
| tensorflow/examples | tensorflow_examples/models/densenet/densenet_distributed_test.py | Python | apache-2.0 | 2,998 | 0.002668 |
from squad.celery import app as celery
from squad.ci.models import Backend, TestJob
from squad.ci.exceptions import SubmissionIssue, FetchIssue
from celery.utils.log import get_task_logger
from squad.mail import Message
from django.conf import settings
from django.template.loader import render_to_string
logger = get_task_logger(__name__)
@celery.task
def poll(backend_id=None):
if backend_id:
backends = Backend.objects.filter(pk=backend_id)
else:
backends = Backend.objects.all()
for backend in backends:
for test_job in backend.poll():
fetch.delay(test_job.id)
@celery.task
def fetch(job_id):
test_job = TestJob.objects.get(pk=job_id)
if test_job.fetch_attempts >= test_job.backend.max_fetch_attempts:
return
logger.info("fetching %s" % test_job)
try:
test_job.backend.fetch(test_job)
except FetchIssue as issue:
logger.warn("error fetching job %s: %s" % (test_job.id, str(issue)))
test_job.failure = str(issue)
test_job.fetched = not issue.retry
test_job.fetch_attempts += 1
test_job.save()
@celery.task(bind=True)
def submit(self, job_id):
test_job = TestJob.objects.get(pk=job_id)
try:
test_job.backend.submit(test_job)
test_job.save()
except SubmissionIssue as issue:
logger.error("submitting job %s to %s: %s" % (test_job.id, test_job.backend.name, str(issue)))
test_job.failure = str(issue)
test_job.save()
if issue.retry:
raise self.retry(exc=issue, countdown=3600) # retry in 1 hour
@celery.task
def send_testjob_resubmit_admin_email(job_id, resubmitted_job_id):
test_job = TestJob.objects.get(pk=job_id)
resubmitted_test_job = TestJob.objects.get(pk=resubmitted_job_id)
admin_subscriptions = test_job.target.admin_subscriptions.all()
sender = "%s <%s>" % (settings.SITE_NAME, settings.EMAIL_FROM)
emails = [r.email for r in admin_subscriptions]
subject = "Resubmitted: %s - TestJob %s: %s, %s, %s" % (
test_job.target,
test_job.job_id,
test_job.job_status,
test_job.environment,
test_job.name)
context = {
'test_job': test_job,
'resubmitted_job': resubmitted_test_job,
'subject': subject,
'settings': settings,
}
text_message = render_to_string(
'squad/ci/testjob_resubmit.txt.jinja2',
context=context,
)
html_message = ''
html_message = render_to_string(
'squad/ci/testjob_resubmit.html.jinja2',
context=context,
)
message = Message(subject, text_message, sender, emails)
if test_job.target.html_mail:
message.attach_alternative(html_message, "text/html")
message.send()
| terceiro/squad | squad/ci/tasks.py | Python | agpl-3.0 | 2,771 | 0.000361 |
#!/usr/bin/env python
# coding: utf-8
__author__ = 'jonathan'
| tntC4stl3/Learn-Flask | tutorial/learn_upload.py | Python | gpl-2.0 | 63 | 0 |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test.utils import override_settings
from sis_provisioner.tests import (
fdao_pws_override, fdao_hrp_override, fdao_bridge_override)
from sis_provisioner.tests.account_managers import set_uw_account
user_file_name_override = override_settings(
BRIDGE_IMPORT_USER_FILENAME="users")
def set_db_records():
affiemp = set_uw_account("affiemp")
javerage = set_uw_account("javerage")
ellen = set_uw_account("ellen")
staff = set_uw_account("staff")
staff.set_disable()
retiree = set_uw_account("retiree")
tyler = set_uw_account("faculty")
leftuw = set_uw_account("leftuw")
leftuw.set_terminate_date()
testid = set_uw_account("testid")
| uw-it-aca/bridge-sis-provisioner | sis_provisioner/tests/csv/__init__.py | Python | apache-2.0 | 784 | 0 |
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
import re
import itertools
import mx
class JavaCompliance(mx.Comparable):
"""
Represents one or more major Java versions.
Example valid compliance specifications and the JDKs they match:
"8+" - jdk8, jdk9, jdk10, ...
"1.8" - jdk8
"8..12" - jdk8, jdk9, jdk10, jdk11, jdk12
"8,13+" - jdk8, jdk13, jdk14, ...
"8..9,13+" - jdk8, jdk9, jdk13, jdk14, ...
"8,11,13+" - jdk8, jdk11, jdk13, jdk14, ...
There can be multiple parts to a version string specifying a non-contiguous range.
Part N of a multi-part version string must have a strict upper bound (i.e. cannot end with "+")
and its upper bound must be less than the lower bound of part N+1. Only major versions less
than 10 can have an optional "1." prefix. The lowest recognized major version is 2.
"""
# Examples: "8", "13"
_int_re = re.compile(r'(\d+)$')
# Example: "1.8..13"
_version_range_re = re.compile(r'(1\.)?(\d+)\.\.(1\.)?(\d+)$')
# Example: "13+"
_open_range_re = re.compile(r'(1\.)?(\d+)\+$')
# Examples: "1.8", "13"
_singleton_range_re = re.compile(r'(1\.)?(\d+)$')
# Examples: "17-loom"
_loom_re = re.compile(r'(\d+)-loom$')
@staticmethod
def _error_prefix(spec, part_index, part):
return 'JavaCompliance("{}"): Part {} ("{}")'.format(spec, part_index, part)
class _Range(mx.Comparable):
"""
Represents a contiguous range of version values.
"""
def __init__(self, low, high):
self._low = low
self._high = high
def __repr__(self):
if self._low == self._high:
return str(self._low)
if self._high is None:
return str(self._low) + '+'
return '{}..{}'.format(self._low, self._high)
def __cmp__(self, other):
r = mx.compare(self._low, other._low)
if r != 0:
return r
if self._high is None:
if other._high is None:
return 0
# self has no high bound, other does
return 1
elif other._high is None:
# other has no high bound, self does
return -1
return mx.compare(self._high, other._high)
def __hash__(self):
return self._low ** (self._high or 1)
def __contains__(self, other):
if isinstance(other, int):
value = int(other)
if value < self._low:
return False
if self._high is None:
return True
return value <= self._high
return False
def _values(self, stop=None):
"""
Returns an iterator over all the Java versions in this range stopping at `stop - 1`.
If `stop` is None and this is an open ended range, this will generate an infinite sequence.
"""
if self._high is None:
if stop is None:
return itertools.count(self._low)
return iter(range(self._low, stop))
return iter(range(self._low, self._high + 1))
def __init__(self, spec, parse_error=None, context=None):
"""
Creates a JavaCompliance based on `spec`.
:param spec: an int specifying a Java version or a str specifying one or more Java versions
:param parse_error: if not None, then it must be a callable that will be called if
`spec` is not a valid compliance specification. It will be called with
an error message and it must raise an exception (i.e. it cannot return
normally). If None, then `mx.abort` is called.
:param context: the context argument if `mx.abort` is called
"""
if parse_error is None:
parse_error = lambda m: mx.abort(m, context=context)
self._loom = False
def _error(part, index, msg):
parse_error('JavaCompliance("{}"): Part {} ("{}") {}'.format(spec, index, part, msg))
def _check_value(value, value_desc='value'):
value = int(value)
if value < 2:
_error(value, 0, 'has unsupported {} since it is less than 2'.format(value_desc))
return value
int_spec = spec if isinstance(spec, int) else int(spec) if isinstance(spec, str) and JavaCompliance._int_re.match(spec) else None
if int_spec is not None:
value = _check_value(spec)
self._parts = (JavaCompliance._Range(value, value),)
return
if not isinstance(spec, str):
spec = str(spec)
parts = spec.split(',')
def _parse_part(part, index):
def _part_error(msg):
_error(part, index, msg)
def _check_part_value(prefix, value, value_desc):
value = _check_value(value, value_desc)
if prefix and value > 9:
_part_error('cannot have "1." prefix on {} since {} > 9'.format(value_desc, value_desc))
return value
m = JavaCompliance._version_range_re.match(part)
if m:
low = _check_part_value(m.group(1), m.group(2), 'low bound')
high = _check_part_value(m.group(3), m.group(4), 'high bound')
if low >= high:
_part_error('has low bound ({}) greater or equal to high bound ({})'.format(low, high))
return JavaCompliance._Range(low, high)
m = JavaCompliance._open_range_re.match(part)
if m:
low = _check_part_value(m.group(1), m.group(2), 'bound')
return JavaCompliance._Range(low, None)
m = JavaCompliance._loom_re.match(part)
if m:
self._loom = True
part = m.group(1)
m = JavaCompliance._singleton_range_re.match(part)
if m:
low = _check_part_value(m.group(1), m.group(2), 'bound')
return JavaCompliance._Range(low, low)
_part_error('is not a recognized version range')
self._parts = tuple((_parse_part(parts[i], i) for i in range(len(parts))))
if len(self._parts) > 1:
for i in range(1, len(self._parts)):
first = self._parts[i - 1]
second = self._parts[i]
if first._high is None:
_error(first, i - 1, 'must have a high bound')
if second._low <= first._high:
_error(first, i - 1, 'must have a high bound ({}) less than the low bound ({}) of part {} ("{}")'.format(first._high, second._low, i, second))
@property
def value(self):
return self._parts[0]._low
def __str__(self):
if self.value >= 9:
return str(self.value)
return '1.' + str(self.value)
def __repr__(self):
return ','.join((repr(b) for b in self._parts))
def _high_bound(self):
return self._parts[-1]._high
def __cmp__(self, other):
if isinstance(other, str):
other = JavaCompliance(other)
return mx.compare(self._parts, other._parts)
def __contains__(self, other):
if isinstance(other, (int, str)):
other = JavaCompliance(other)
assert other._high_bound() is not None, "Contains check cannot be done with version ranges"
r = mx.compare(self.value, other.value)
if r == 0:
return True
elif r > 0:
return False
else: # r < 0
if self._high_bound() is None:
return True
else:
return mx.compare(self._high_bound(), other.value) >= 0
def __hash__(self):
return hash((self._parts, self._loom))
def _is_exact_bound(self):
return self.value == self._high_bound()
def _exact_match(self, version):
assert isinstance(version, mx.VersionSpec)
if self._loom and not version._loom:
# only skip those suites who require Loom
return False
if len(version.parts) > 0:
if len(version.parts) > 1 and version.parts[0] == 1:
# First part is a '1', e.g. '1.8.0'.
value = version.parts[1]
else:
# No preceding '1', e.g. '9-ea'. Used for Java 9 early access releases.
value = version.parts[0]
return any((value in b for b in self._parts))
return False
def as_version_check(self):
if self._is_exact_bound():
versionDesc = str(self)
elif self._high_bound() is None:
versionDesc = '>=' + str(self)
else:
versionDesc = 'in ' + repr(self)
versionCheck = self._exact_match
return (versionCheck, versionDesc)
def _values(self, stop=None):
"""
Returns an iterator over all the Java versions that match this compliance object
up to but not including `stop`. If `stop` is None and this is an open ended
compliance, this will generate an infinite sequence.
"""
return itertools.chain(*(p._values(stop=stop) for p in self._parts))
def highest_specified_value(self):
"""
Gets the highest explicitly specified value of this Java compliance.
Examples:
8+ --> 8
8,13+ --> 13
8,11,13+ --> 13
8..11,13+ --> 13
"""
highest_part = self._parts[-1]
return highest_part._high or highest_part._low
def _test():
"""
Mx suite specific tests.
"""
# JavaCompliance tests
good_specs = [
(2, True),
(1.2, True),
(11, True),
(200, True),
('2', True),
('1.2', True),
('1.8', True),
('1.5+', False),
('2..4', False),
('1.8..9', False),
('2..3,4+', False),
('2..3,4,7+', False),
('2..3,4..5,7+', False),
('2..3,4..5,7,8,9,10,15..18,120', False),
]
bad_specs = [
1,
'1',
'1.1',
'1.10',
'1.8..1.10',
'1.10+',
'2..1',
'2..2',
'1,,3',
'1..3+',
'1+,4..5',
'13+ignored',
'1..3,7..5',
'4,7,1..3,',
'4..5,1..3',
]
for spec, exact in good_specs:
p = mx.JavaCompliance(spec)
assert p._is_exact_bound() is exact, p
# Just ensure these methods execute without exception
p.as_version_check()
p._values(stop=20)
hash(p)
if mx.get_opts().verbose:
if isinstance(spec, str):
spec = '"' + spec + '"'
mx.log('{}: str="{}", repr="{}", hash={}'.format(spec, str(p), repr(p), hash(p)))
for spec in bad_specs:
class SpecError(Exception):
pass
def _parse_error(msg):
if mx.get_opts().verbose:
mx.log('saw expected SpecError: ' + msg)
raise SpecError(msg)
try:
mx.JavaCompliance(spec, parse_error=_parse_error)
mx.abort('expected SpecError while parsing "{}"'.format(spec))
except SpecError:
pass
| graalvm/mx | mx_javacompliance.py | Python | gpl-2.0 | 12,586 | 0.002225 |
#! /usr/bin/env python
"""
Module with pixel and frame subsampling functions.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez, Valentin Christiaens'
__all__ = ['cube_collapse',
'cube_subsample',
'cube_subsample_trimmean']
import numpy as np
def cube_collapse(cube, mode='median', n=50, w=None):
""" Collapses a cube into a frame (3D array -> 2D array) depending on the
parameter ``mode``. It's possible to perform a trimmed mean combination of
the frames based on description in Brandt+ 2012.
Parameters
----------
cube : numpy ndarray
Cube.
mode : {'median', 'mean', 'sum', 'trimmean', 'max', 'wmean'}, str optional
Sets the way of collapsing the images in the cube.
'wmean' stands for weighted mean and requires weights w to be provided.
n : int, optional
Sets the discarded values at high and low ends. When n = N is the same
as taking the mean, when n = 1 is like taking the median.
w: 1d numpy array or list, optional
Weights to be applied for a weighted mean. Need to be provided if
collapse mode is 'wmean'.
Returns
-------
frame : numpy ndarray
Output array, cube combined.
"""
arr = cube
if arr.ndim != 3:
raise TypeError('The input array is not a cube or 3d array.')
if mode == 'wmean':
if w is None:
raise ValueError("Weights have to be provided for weighted mean mode")
if len(w) != cube.shape[0]:
raise TypeError("Weights need same length as cube")
if isinstance(w,list):
w = np.array(w)
if mode == 'mean':
frame = np.mean(arr, axis=0)
elif mode == 'median':
frame = np.median(arr, axis=0)
elif mode == 'sum':
frame = np.sum(arr, axis=0)
elif mode == 'max':
frame = np.max(arr, axis=0)
elif mode == 'trimmean':
N = arr.shape[0]
if N % 2 == 0:
k = (N - n)//2
else:
k = (N - n)/2
frame = np.empty_like(arr[0])
for index, _ in np.ndenumerate(arr[0]):
sort = np.sort(arr[:, index[0], index[1]])
frame[index] = np.mean(sort[k:N-k])
elif mode == 'wmean':
frame = np.inner(w, np.moveaxis(arr,0,-1))
return frame
def cube_subsample(array, n, mode="mean", parallactic=None, verbose=True):
"""Mean/Median combines frames in 3d or 4d cube with window ``n``.
Parameters
----------
array : numpy ndarray
Input 3d array, cube.
n : int
Window for mean/median.
mode : {'mean','median'}, optional
Switch for choosing mean or median.
parallactic : numpy ndarray, optional
List of corresponding parallactic angles.
verbose : bool optional
Returns
-------
arr_view : numpy ndarray
Resulting array.
If ``parallactic`` is provided the the new cube and angles are returned.
"""
if array.ndim not in [3, 4]:
raise TypeError('The input array is not a cube or 3d or 4d array')
if mode == 'median':
func = np.median
elif mode == 'mean':
func = np.mean
else:
raise ValueError('`Mode` should be either Mean or Median')
if array.ndim == 3:
m = int(array.shape[0] / n)
resid = array.shape[0] % n
y = array.shape[1]
x = array.shape[2]
arr = np.empty([m, y, x])
if parallactic is not None:
angles = np.zeros(m)
for i in range(m):
arr[i, :, :] = func(array[n * i:n * i + n, :, :], axis=0)
if parallactic is not None:
angles[i] = func(parallactic[n * i:n * i + n])
elif array.ndim == 4:
m = int(array.shape[1] / n)
resid = array.shape[1] % n
w = array.shape[0]
y = array.shape[2]
x = array.shape[3]
arr = np.empty([w, m, y, x])
if parallactic is not None:
angles = np.zeros(m)
for j in range(w):
for i in range(m):
arr[j, i, :, :] = func(array[j, n * i:n * i + n, :, :], axis=0)
if parallactic is not None:
angles[i] = func(parallactic[n * i:n * i + n])
if verbose:
msg = "Cube temporally subsampled by taking the {} of every {} frames"
print(msg.format(mode, n))
if resid > 0:
print("Initial # of frames and window are not multiples ({} "
"frames were dropped)".format(resid))
print("New shape: {}".format(arr.shape))
if parallactic is not None:
return arr, angles
else:
return arr
def cube_subsample_trimmean(arr, n, m):
"""Performs a trimmed mean combination every m frames in a cube. Based on
description in Brandt+ 2012.
Parameters
----------
arr : numpy ndarray
Cube.
n : int
Sets the discarded values at high and low ends. When n = N is the same
as taking the mean, when n = 1 is like taking the median.
m : int
Window from the trimmed mean.
Returns
-------
arr_view : numpy ndarray
Output array, cube combined.
"""
if arr.ndim != 3:
raise TypeError('The input array is not a cube or 3d array')
num = int(arr.shape[0]/m)
res = int(arr.shape[0]%m)
y = arr.shape[1]
x = arr.shape[2]
arr2 = np.empty([num+2, y, x])
for i in range(num):
arr2[0] = cube_collapse(arr[:m, :, :], 'trimmean', n)
if i > 0:
arr2[i] = cube_collapse(arr[m*i:m*i+m, :, :], 'trimmean', n)
arr2[num] = cube_collapse(arr[-res:, :, :], 'trimmean', n)
arr_view = arr2[:num+1] # slicing until m+1 - last index not included
msg = "Cube temporally subsampled by taking the trimmed mean of every {} "
msg += "frames"
print(msg.format(m))
return arr_view | carlgogo/vip_exoplanets | vip_hci/preproc/subsampling.py | Python | bsd-3-clause | 6,167 | 0.005189 |
# Name: tools.py
# Purpose: XRC editor, toolbar
# Author: Roman Rolinsky <rolinsky@mema.ucl.ac.be>
# Created: 19.03.2003
# RCS-ID: $Id: tools.py,v 1.12 2006/05/17 03:57:57 RD Exp $
from xxx import * # xxx imports globals and params
from tree import ID_NEW
# Icons
import images
# Groups of controls
GROUPNUM = 4
GROUP_WINDOWS, GROUP_MENUS, GROUP_SIZERS, GROUP_CONTROLS = range(GROUPNUM)
# States depending on current selection and Control/Shift keys
STATE_ROOT, STATE_MENUBAR, STATE_TOOLBAR, STATE_MENU, STATE_STDDLGBTN, STATE_ELSE = range(6)
# Left toolbar for GUI elements
class Tools(wx.Panel):
TOOL_SIZE = (30, 30)
def __init__(self, parent):
if wx.Platform == '__WXGTK__':
wx.Panel.__init__(self, parent, -1,
style=wx.RAISED_BORDER|wx.WANTS_CHARS)
else:
wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS)
# Create sizer for groups
self.sizer = wx.BoxSizer(wx.VERTICAL)
# Data to create buttons
self.groups = []
self.ctrl = self.shift = False
# Current state (what to enable/disable)
self.state = None
groups = [
["Windows",
(ID_NEW.FRAME, images.getToolFrameBitmap()),
(ID_NEW.DIALOG, images.getToolDialogBitmap()),
(ID_NEW.PANEL, images.getToolPanelBitmap())],
["Menus",
(ID_NEW.TOOL_BAR, images.getToolToolBarBitmap()),
(ID_NEW.MENU_BAR, images.getToolMenuBarBitmap()),
(ID_NEW.MENU, images.getToolMenuBitmap()),
(ID_NEW.TOOL, images.getToolToolBitmap()),
(ID_NEW.MENU_ITEM, images.getToolMenuItemBitmap()),
(ID_NEW.SEPARATOR, images.getToolSeparatorBitmap())],
["Sizers",
(ID_NEW.BOX_SIZER, images.getToolBoxSizerBitmap()),
(ID_NEW.STATIC_BOX_SIZER, images.getToolStaticBoxSizerBitmap()),
(ID_NEW.GRID_SIZER, images.getToolGridSizerBitmap()),
(ID_NEW.FLEX_GRID_SIZER, images.getToolFlexGridSizerBitmap()),
(ID_NEW.GRID_BAG_SIZER, images.getToolGridBagSizerBitmap()),
(ID_NEW.SPACER, images.getToolSpacerBitmap())],
["Controls",
(ID_NEW.STATIC_TEXT, images.getToolStaticTextBitmap()),
(ID_NEW.STATIC_BITMAP, images.getToolStaticBitmapBitmap()),
(ID_NEW.STATIC_LINE, images.getToolStaticLineBitmap()),
(ID_NEW.BUTTON, images.getToolButtonBitmap()),
(ID_NEW.BITMAP_BUTTON, images.getToolBitmapButtonBitmap()),
(ID_NEW.STATIC_BOX, images.getToolStaticBoxBitmap()),
(ID_NEW.TEXT_CTRL, images.getToolTextCtrlBitmap()),
(ID_NEW.COMBO_BOX, images.getToolComboBoxBitmap()),
(ID_NEW.CHOICE, images.getToolChoiceBitmap()),
(ID_NEW.RADIO_BUTTON, images.getToolRadioButtonBitmap()),
(ID_NEW.CHECK_BOX, images.getToolCheckBoxBitmap()),
(ID_NEW.RADIO_BOX, images.getToolRadioBoxBitmap()),
(ID_NEW.SPIN_CTRL, images.getToolSpinCtrlBitmap()),
(ID_NEW.SPIN_BUTTON, images.getToolSpinButtonBitmap()),
(ID_NEW.SCROLL_BAR, images.getToolScrollBarBitmap()),
(ID_NEW.SLIDER, images.getToolSliderBitmap()),
(ID_NEW.GAUGE, images.getToolGaugeBitmap()),
(ID_NEW.TREE_CTRL, images.getToolTreeCtrlBitmap()),
(ID_NEW.LIST_BOX, images.getToolListBoxBitmap()),
(ID_NEW.CHECK_LIST, images.getToolCheckListBitmap()),
(ID_NEW.LIST_CTRL, images.getToolListCtrlBitmap()),
(ID_NEW.NOTEBOOK, images.getToolNotebookBitmap()),
(ID_NEW.SPLITTER_WINDOW, images.getToolSplitterWindowBitmap()),
(ID_NEW.UNKNOWN, images.getToolUnknownBitmap())]
]
from tree import customCreateMap
if customCreateMap:
customGroup=['Custom']
for id in customCreateMap:
customGroup.append( (id, images.getToolUnknownBitmap()))
groups.append(customGroup)
for grp in groups:
self.AddGroup(grp[0])
for b in grp[1:]:
self.AddButton(b[0], b[1], g.pullDownMenu.createMap[b[0]])
self.SetAutoLayout(True)
self.SetSizerAndFit(self.sizer)
# Allow to be resized in vertical direction only
self.SetSizeHints(self.GetSize()[0], -1)
# Events
wx.EVT_COMMAND_RANGE(self, ID_NEW.PANEL, ID_NEW.LAST,
wx.wxEVT_COMMAND_BUTTON_CLICKED, g.frame.OnCreate)
wx.EVT_KEY_DOWN(self, self.OnKeyDown)
wx.EVT_KEY_UP(self, self.OnKeyUp)
def AddButton(self, id, image, text):
from wx.lib import buttons
button = buttons.GenBitmapButton(self, id, image, size=self.TOOL_SIZE,
style=wx.NO_BORDER|wx.WANTS_CHARS)
button.SetBezelWidth(0)
wx.EVT_KEY_DOWN(button, self.OnKeyDown)
wx.EVT_KEY_UP(button, self.OnKeyUp)
button.SetToolTipString(text)
self.curSizer.Add(button)
self.groups[-1][1][id] = button
def AddGroup(self, name):
# Each group is inside box
box = wx.StaticBox(self, -1, name, style=wx.WANTS_CHARS)
box.SetFont(g.smallerFont())
boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)
boxSizer.Add((0, 4))
self.curSizer = wx.GridSizer(0, 3)
boxSizer.Add(self.curSizer)
self.sizer.Add(boxSizer, 0, wx.TOP | wx.LEFT | wx.RIGHT, 4)
self.groups.append((box,{}))
# Enable/disable group
def EnableGroup(self, gnum, enable = True):
grp = self.groups[gnum]
grp[0].Enable(enable)
for b in grp[1].values(): b.Enable(enable)
# Enable/disable group item
def EnableGroupItem(self, gnum, id, enable = True):
grp = self.groups[gnum]
grp[1][id].Enable(enable)
# Enable/disable group items
def EnableGroupItems(self, gnum, ids, enable = True):
grp = self.groups[gnum]
for id in ids:
grp[1][id].Enable(enable)
# Process key events
def OnKeyDown(self, evt):
if evt.GetKeyCode() == wx.WXK_CONTROL:
g.tree.ctrl = True
elif evt.GetKeyCode() == wx.WXK_SHIFT:
g.tree.shift = True
self.UpdateIfNeeded()
evt.Skip()
def OnKeyUp(self, evt):
if evt.GetKeyCode() == wx.WXK_CONTROL:
g.tree.ctrl = False
elif evt.GetKeyCode() == wx.WXK_SHIFT:
g.tree.shift = False
self.UpdateIfNeeded()
evt.Skip()
def OnMouse(self, evt):
# Update control and shift states
g.tree.ctrl = evt.ControlDown()
g.tree.shift = evt.ShiftDown()
self.UpdateIfNeeded()
evt.Skip()
# Update UI after key presses, if necessary
def UpdateIfNeeded(self):
tree = g.tree
if self.ctrl != tree.ctrl or self.shift != tree.shift:
# Enabling is needed only for ctrl
if self.ctrl != tree.ctrl: self.UpdateUI()
self.ctrl = tree.ctrl
self.shift = tree.shift
if tree.ctrl:
status = 'SBL'
elif tree.shift:
status = 'INS'
else:
status = ''
g.frame.SetStatusText(status, 1)
# Update interface
def UpdateUI(self):
if not self.IsShown(): return
# Update status bar
tree = g.tree
item = tree.selection
# If nothing selected, disable everything and return
if not item:
# Disable everything
for grp in range(GROUPNUM):
self.EnableGroup(grp, False)
self.state = None
return
if tree.ctrl: needInsert = True
else: needInsert = tree.NeedInsert(item)
# Enable depending on selection
if item == tree.root or needInsert and tree.GetItemParent(item) == tree.root:
state = STATE_ROOT
else:
xxx = tree.GetPyData(item).treeObject()
# Check parent for possible child nodes if inserting sibling
if needInsert: xxx = xxx.parent
if xxx.__class__ == xxxMenuBar:
state = STATE_MENUBAR
elif xxx.__class__ in [xxxToolBar, xxxTool] or \
xxx.__class__ == xxxSeparator and xxx.parent.__class__ == xxxToolBar:
state = STATE_TOOLBAR
elif xxx.__class__ in [xxxMenu, xxxMenuItem]:
state = STATE_MENU
elif xxx.__class__ == xxxStdDialogButtonSizer:
state = STATE_STDDLGBTN
else:
state = STATE_ELSE
# Enable depending on selection
if state != self.state:
# Disable everything
for grp in range(GROUPNUM):
self.EnableGroup(grp, False)
# Enable some
if state == STATE_ROOT:
self.EnableGroup(GROUP_WINDOWS, True)
self.EnableGroup(GROUP_MENUS, True)
# But disable items
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL,
ID_NEW.MENU_ITEM,
ID_NEW.SEPARATOR ],
False)
elif state == STATE_STDDLGBTN:
pass # nothing can be added from toolbar
elif state == STATE_MENUBAR:
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL_BAR,
ID_NEW.MENU_BAR,
ID_NEW.TOOL ],
False)
elif state == STATE_TOOLBAR:
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL_BAR,
ID_NEW.MENU,
ID_NEW.MENU_BAR,
ID_NEW.MENU_ITEM ],
False)
self.EnableGroup(GROUP_CONTROLS)
self.EnableGroupItems(GROUP_CONTROLS,
[ ID_NEW.TREE_CTRL,
ID_NEW.NOTEBOOK,
ID_NEW.SPLITTER_WINDOW ],
False)
elif state == STATE_MENU:
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.TOOL_BAR,
ID_NEW.MENU_BAR,
ID_NEW.TOOL ],
False)
else:
self.EnableGroup(GROUP_WINDOWS)
self.EnableGroupItems(GROUP_WINDOWS,
[ ID_NEW.FRAME,
ID_NEW.DIALOG ],
False)
self.EnableGroup(GROUP_MENUS)
self.EnableGroupItems(GROUP_MENUS,
[ ID_NEW.MENU_BAR,
ID_NEW.MENU_BAR,
ID_NEW.MENU,
ID_NEW.MENU_ITEM,
ID_NEW.TOOL,
ID_NEW.SEPARATOR ],
False)
self.EnableGroup(GROUP_SIZERS)
self.EnableGroup(GROUP_CONTROLS)
# Special case for *book (always executed)
if state == STATE_ELSE:
if xxx.__class__ in [xxxNotebook, xxxChoicebook, xxxListbook]:
self.EnableGroup(GROUP_SIZERS, False)
else:
self.EnableGroup(GROUP_SIZERS)
if not (xxx.isSizer or xxx.parent and xxx.parent.isSizer):
self.EnableGroupItem(GROUP_SIZERS, ID_NEW.SPACER, False)
if xxx.__class__ == xxxFrame:
self.EnableGroupItem(GROUP_MENUS, ID_NEW.MENU_BAR)
# Save state
self.state = state
| andreas-p/admin4 | xrced/tools.py | Python | apache-2.0 | 12,508 | 0.003997 |
import util, pexpect, time, math
from pymavlink import mavwp
# a list of pexpect objects to read while waiting for
# messages. This keeps the output to stdout flowing
expect_list = []
def expect_list_clear():
'''clear the expect list'''
global expect_list
for p in expect_list[:]:
expect_list.remove(p)
def expect_list_extend(list):
'''extend the expect list'''
global expect_list
expect_list.extend(list)
def idle_hook(mav):
'''called when waiting for a mavlink message'''
global expect_list
for p in expect_list:
util.pexpect_drain(p)
def message_hook(mav, msg):
'''called as each mavlink msg is received'''
idle_hook(mav)
def expect_callback(e):
'''called when waiting for a expect pattern'''
global expect_list
for p in expect_list:
if p == e:
continue
util.pexpect_drain(p)
def get_distance(loc1, loc2):
'''get ground distance between two locations'''
dlat = loc2.lat - loc1.lat
dlong = loc2.lng - loc1.lng
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_bearing(loc1, loc2):
'''get bearing from loc1 to loc2'''
off_x = loc2.lng - loc1.lng
off_y = loc2.lat - loc1.lat
bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795
if bearing < 0:
bearing += 360.00
return bearing;
def wait_altitude(mav, alt_min, alt_max, timeout=30):
climb_rate = 0
previous_alt = 0
'''wait for a given altitude range'''
tstart = time.time()
print("Waiting for altitude between %u and %u" % (alt_min, alt_max))
while time.time() < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
climb_rate = m.alt - previous_alt
previous_alt = m.alt
print("Wait Altitude: Cur:%u, min_alt:%u, climb_rate: %u" % (m.alt, alt_min , climb_rate))
if abs(climb_rate) > 0:
tstart = time.time();
if m.alt >= alt_min and m.alt <= alt_max:
print("Altitude OK")
return True
print("Failed to attain altitude range")
return False
def wait_groundspeed(mav, gs_min, gs_max, timeout=30):
'''wait for a given ground speed range'''
tstart = time.time()
print("Waiting for groundspeed between %.1f and %.1f" % (gs_min, gs_max))
while time.time() < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("Wait groundspeed %.1f, target:%.1f" % (m.groundspeed, gs_min))
if m.groundspeed >= gs_min and m.groundspeed <= gs_max:
return True
print("Failed to attain groundspeed range")
return False
def wait_roll(mav, roll, accuracy, timeout=30):
'''wait for a given roll in degrees'''
tstart = time.time()
print("Waiting for roll of %u" % roll)
while time.time() < tstart + timeout:
m = mav.recv_match(type='ATTITUDE', blocking=True)
r = math.degrees(m.roll)
print("Roll %u" % r)
if math.fabs(r - roll) <= accuracy:
print("Attained roll %u" % roll)
return True
print("Failed to attain roll %u" % roll)
return False
def wait_pitch(mav, pitch, accuracy, timeout=30):
'''wait for a given pitch in degrees'''
tstart = time.time()
print("Waiting for pitch of %u" % pitch)
while time.time() < tstart + timeout:
m = mav.recv_match(type='ATTITUDE', blocking=True)
r = math.degrees(m.pitch)
print("Pitch %u" % r)
if math.fabs(r - pitch) <= accuracy:
print("Attained pitch %u" % pitch)
return True
print("Failed to attain pitch %u" % pitch)
return False
def wait_heading(mav, heading, accuracy=5, timeout=30):
'''wait for a given heading'''
tstart = time.time()
print("Waiting for heading %u with accuracy %u" % (heading, accuracy))
while time.time() < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("Heading %u" % m.heading)
if math.fabs(m.heading - heading) <= accuracy:
print("Attained heading %u" % heading)
return True
print("Failed to attain heading %u" % heading)
return False
def wait_distance(mav, distance, accuracy=5, timeout=30):
'''wait for flight of a given distance'''
tstart = time.time()
start = mav.location()
while time.time() < tstart + timeout:
pos = mav.location()
delta = get_distance(start, pos)
print("Distance %.2f meters" % delta)
if math.fabs(delta - distance) <= accuracy:
print("Attained distance %.2f meters OK" % delta)
return True
if delta > (distance + accuracy):
print("Failed distance - overshoot delta=%f distance=%f" % (delta, distance))
return False
print("Failed to attain distance %u" % distance)
return False
def wait_location(mav, loc, accuracy=5, timeout=30, target_altitude=None, height_accuracy=-1):
'''wait for arrival at a location'''
tstart = time.time()
if target_altitude is None:
target_altitude = loc.alt
print("Waiting for location %.4f,%.4f at altitude %.1f height_accuracy=%.1f" % (
loc.lat, loc.lng, target_altitude, height_accuracy))
while time.time() < tstart + timeout:
pos = mav.location()
delta = get_distance(loc, pos)
print("Distance %.2f meters alt %.1f" % (delta, pos.alt))
if delta <= accuracy:
if height_accuracy != -1 and math.fabs(pos.alt - target_altitude) > height_accuracy:
continue
print("Reached location (%.2f meters)" % delta)
return True
print("Failed to attain location")
return False
def wait_waypoint(mav, wpnum_start, wpnum_end, allow_skip=True, max_dist=2, timeout=400, mode=None):
'''wait for waypoint ranges'''
tstart = time.time()
# this message arrives after we set the current WP
start_wp = mav.waypoint_current()
current_wp = start_wp
print("\ntest: wait for waypoint ranges start=%u end=%u\n\n" % (wpnum_start, wpnum_end))
# if start_wp != wpnum_start:
# print("test: Expected start waypoint %u but got %u" % (wpnum_start, start_wp))
# return False
while time.time() < tstart + timeout:
seq = mav.waypoint_current()
m = mav.recv_match(type='NAV_CONTROLLER_OUTPUT', blocking=True)
wp_dist = m.wp_dist
m = mav.recv_match(type='VFR_HUD', blocking=True)
# if we exited the required mode, finish
if mode is not None and mav.flightmode != mode:
print('Exited %s mode' % mode)
return True
print("test: WP %u (wp_dist=%u Alt=%d), current_wp: %u, wpnum_end: %u" % (seq, wp_dist, m.alt, current_wp, wpnum_end))
if seq == current_wp+1 or (seq > current_wp+1 and allow_skip):
print("test: Starting new waypoint %u" % seq)
tstart = time.time()
current_wp = seq
# the wp_dist check is a hack until we can sort out the right seqnum
# for end of mission
#if current_wp == wpnum_end or (current_wp == wpnum_end-1 and wp_dist < 2):
if (current_wp == wpnum_end and wp_dist < max_dist):
print("Reached final waypoint %u" % seq)
return True
if (seq >= 255):
print("Reached final waypoint %u" % seq)
return True
if seq > current_wp+1:
print("Failed: Skipped waypoint! Got wp %u expected %u" % (seq, current_wp+1))
return False
print("Failed: Timed out waiting for waypoint %u of %u" % (wpnum_end, wpnum_end))
return False
def save_wp(mavproxy, mav):
mavproxy.send('rc 7 2000\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan7_raw==2000', blocking=True)
mavproxy.send('rc 7 1000\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan7_raw==1000', blocking=True)
def wait_mode(mav, mode):
'''wait for a flight mode to be engaged'''
print("Waiting for mode %s" % mode)
mav.recv_match(condition='MAV.flightmode.upper()=="%s".upper()' % mode, blocking=True)
print("Got mode %s" % mode)
def mission_count(filename):
'''load a mission from a file and return number of waypoints'''
wploader = mavwp.MAVWPLoader()
wploader.load(filename)
num_wp = wploader.count()
return num_wp
| owenson/ardupilot | Tools/autotest/common.py | Python | gpl-3.0 | 8,336 | 0.004439 |
# Generated by Django 1.11.20 on 2019-03-08 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_duration_limits', '0005_auto_20190306_1546'),
]
operations = [
migrations.AddIndex(
model_name='coursedurationlimitconfig',
index=models.Index(fields=['site', 'org', 'course'], name='course_dura_site_id_424016_idx'),
),
]
| edx/edx-platform | openedx/features/course_duration_limits/migrations/0006_auto_20190308_1447.py | Python | agpl-3.0 | 448 | 0.002232 |
import traceback
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
from django.core.exceptions import ValidationError
from django.utils.html import format_html
from agir.lib.form_fields import AdminRichEditorWidget, AdminJsonWidget
from agir.lib.forms import CoordinatesFormMixin
from agir.people.models import Person
from agir.people.person_forms.actions import (
validate_custom_fields,
get_people_form_class,
)
class PersonAdminForm(CoordinatesFormMixin, forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["primary_email"] = forms.ModelChoiceField(
self.instance.emails.all(),
initial=self.instance.primary_email,
required=True,
label="Email principal",
)
def _save_m2m(self):
super()._save_m2m()
if self.cleaned_data["primary_email"] != self.instance.primary_email:
self.instance.set_primary_email(self.cleaned_data["primary_email"])
class Meta:
fields = "__all__"
class PersonFormForm(forms.ModelForm):
class Meta:
fields = "__all__"
widgets = {
"description": AdminRichEditorWidget(),
"confirmation_note": AdminRichEditorWidget(),
"custom_fields": AdminJsonWidget(),
"config": AdminJsonWidget(),
}
def clean_custom_fields(self):
value = self.cleaned_data["custom_fields"]
validate_custom_fields(value)
return value
def _post_clean(self):
super()._post_clean()
try:
klass = get_people_form_class(self.instance)
klass()
except Exception:
self.add_error(
None,
ValidationError(
format_html(
"<p>{message}</p><pre>{stacktrace}</pre>",
message="Problème de création du formulaire. L'exception suivante a été rencontrée :",
stacktrace=traceback.format_exc(),
)
),
)
class AddPersonEmailForm(forms.Form):
email = forms.EmailField(label="Adresse email à ajouter", required=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit("ajouter", "Ajouter"))
class ChoosePrimaryAccount(forms.Form):
primary_account = forms.ModelChoiceField(
label="Compte principal", required=True, queryset=Person.objects.all()
)
def __init__(self, *args, persons, **kwargs):
super().__init__(*args, **kwargs)
self.fields["primary_account"].choices = [
("", "Choisir le compte principal")
] + [(p.id, p.email) for p in persons]
self.helper = FormHelper()
self.helper.add_input(Submit("fusionner", "Fusionner les comptes"))
| lafranceinsoumise/api-django | agir/people/admin/forms.py | Python | agpl-3.0 | 2,983 | 0.000336 |
#!/usr/bin/python2.7
from boto.glacier.layer1 import Layer1
from boto.glacier.concurrent import ConcurrentUploader
import sys
import os.path
from time import gmtime, strftime
access_key_id = "xxx"
secret_key = "xxx"
target_vault_name = "xxx"
inventory = "xxx"
# the file to be uploaded into the vault as an archive
fname = sys.argv[1]
# a description you give to the file
fdes = os.path.basename(sys.argv[1])
if not os.path.isfile(fname) :
print("Can't find the file to upload")
sys.exit(-1);
# glacier uploader
glacier_layer1 = Layer1(aws_access_key_id=access_key_id, aws_secret_access_key=secret_key, is_secure=True)
uploader = ConcurrentUploader(glacier_layer1, target_vault_name, part_size=128*1024*1024, num_threads=1)
archive_id = uploader.upload(fname, fdes)
# write an inventory file
f = open(inventory, 'a+')
f.write(archive_id+'\t'+fdes+'\n')
f.close()
sys.exit(0);
| kickino/aws-scripts | glacier/glacier_push.py | Python | gpl-3.0 | 896 | 0.007813 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os
from calibre import prints as prints_, preferred_encoding, isbytestring
from calibre.utils.config import Config, ConfigProxy, JSONConfig
from calibre.utils.ipc.launch import Worker
from calibre.constants import __appname__, __version__, iswindows
from calibre.gui2 import error_dialog
# Time to wait for communication to/from the interpreter process
POLL_TIMEOUT = 0.01 # seconds
preferred_encoding, isbytestring, __appname__, __version__, error_dialog, \
iswindows
def console_config():
desc='Settings to control the calibre console'
c = Config('console', desc)
c.add_opt('theme', default='native', help='The color theme')
c.add_opt('scrollback', default=10000,
help='Max number of lines to keep in the scrollback buffer')
return c
prefs = ConfigProxy(console_config())
dynamic = JSONConfig('console')
def prints(*args, **kwargs):
kwargs['file'] = sys.__stdout__
prints_(*args, **kwargs)
class Process(Worker):
@property
def env(self):
env = dict(os.environ)
env.update(self._env)
return env
| alexston/calibre-webserver | src/calibre/utils/pyconsole/__init__.py | Python | gpl-3.0 | 1,289 | 0.013189 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.stat import Statistics
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="HypothesisTestingKolmogorovSmirnovTestExample")
# $example on$
parallelData = sc.parallelize([0.1, 0.15, 0.2, 0.3, 0.25])
# run a KS test for the sample versus a standard normal distribution
testResult = Statistics.kolmogorovSmirnovTest(parallelData, "norm", 0, 1)
# summary of the test including the p-value, test statistic, and null hypothesis
# if our p-value indicates significance, we can reject the null hypothesis
# Note that the Scala functionality of calling Statistics.kolmogorovSmirnovTest with
# a lambda to calculate the CDF is not made available in the Python API
print(testResult)
# $example off$
sc.stop()
| fharenheit/template-spark-app | src/main/python/mllib/hypothesis_testing_kolmogorov_smirnov_test_example.py | Python | apache-2.0 | 1,658 | 0.001206 |
#!/usr/bin/env python3
from setuptools import setup
setup(
name='SecFS',
version='0.1.0',
description='6.858 final project --- an encrypted and authenticated file system',
long_description= open('README.md', 'r').read(),
author='Jon Gjengset',
author_email='jon@thesquareplanet.com',
maintainer='MIT PDOS',
maintainer_email='pdos@csail.mit.edu',
url='https://github.com/mit-pdos/6.858-secfs',
packages=['secfs', 'secfs.store'],
install_requires=['llfuse', 'Pyro4', 'serpent', 'cryptography'],
scripts=['bin/secfs-server', 'bin/secfs-fuse'],
license='MIT',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Education",
"Topic :: Security",
"Topic :: System :: Filesystems",
]
)
| mit-pdos/secfs-skeleton | setup.py | Python | mit | 883 | 0.002265 |
#!/usr/bin/python
import time
import serial
import sys
import json
def ftdi232h_cmd(thePort, theBaud, theCommand):
ser = serial.Serial(
port=thePort, # /dev/ttyUSB0
baudrate=theBaud, # 9600
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
xonxoff=0, rtscts=0, dsrdtr=0,
)
ser.isOpen()
ser.write(theCommand+"\n")
endtime = time.time()+1 #0.2 # wait 0.2 sec
result = ""
while time.time() < endtime:
while ser.inWaiting() > 0:
#result=result+ser.read(1)
result=result+ser.read()
#result=ser.read()
ser.close()
#return result
return 'callback: ['+result.rstrip('\n')+']'
if len(sys.argv)!=4:
print "USAGE: ft232h__PythonCompanion_WIP_TESTER.py "+'"'+"<port> <baud> <serial message>"+'"'
exit(1)
print ftdi232h_cmd(sys.argv[1], sys.argv[2], sys.argv[3]).strip()
'''
import time
import serial
import sys
import json
ser = serial.Serial(
port='/dev/ttyUSB0', # or /dev/ttyAMA0 for serial on the Raspberry Pi
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
xonxoff=0, rtscts=0, dsrdtr=0,
)
ser.isOpen()
result = ""
ser.write("1"+"\n")
while ser.inWaiting() > 0:
result=result+ser.read()
result
'''
| stephaneAG/FT232H | ft232h__PythonCompanion_WIP_TESTER.py | Python | mit | 1,231 | 0.023558 |
# $Id: version.py 148 2006-09-22 01:30:23Z quarl $
import subprocess
def pipefrom(cmd):
return subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
# TODO: get this from config file or vice versa, but don't hard-code both.
version = '1.1.0'
| prokoudine/gimp-deskew-plugin | admin/version.py | Python | gpl-2.0 | 258 | 0.007752 |
from decimal import Decimal
import mock
from unittest2 import TestCase
from restorm.clients.jsonclient import JSONClient, JSONClientMixin
class JSONClientTests(TestCase):
def setUp(self):
self.client = JSONClient()
@mock.patch('httplib2.Http.request')
def test_get(self, request):
request.return_value = ({'Status': 200, 'Content-Type': 'application/json'}, '{"foo": "bar"}')
response = self.client.get(uri='http://localhost/api')
data = response.content
self.assertIsInstance(data, dict)
self.assertTrue('foo' in data)
self.assertEqual(data['foo'], 'bar')
@mock.patch('httplib2.Http.request')
def test_incorrect_content_type(self, request):
request.return_value = ({'Status': 200, 'Content-Type': 'foobar'}, '{"foo": "bar"}')
response = self.client.get(uri='http://localhost/api')
data = response.content
self.assertIsInstance(data, basestring)
self.assertEqual(data, '{"foo": "bar"}')
class JSONClientMixinTests(TestCase):
def setUp(self):
self.mixin = JSONClientMixin()
def test_empty(self):
original_data = None
serialized_data = self.mixin.serialize(original_data)
self.assertEqual(serialized_data, '')
deserialized_data = self.mixin.deserialize(serialized_data)
self.assertEqual(original_data, deserialized_data)
def test_empty_string(self):
original_data = ''
serialized_data = self.mixin.serialize(original_data)
self.assertEqual(serialized_data, '""')
deserialized_data = self.mixin.deserialize(serialized_data)
self.assertEqual(original_data, deserialized_data)
def test_complex_data(self):
original_data = {'a': ['b', 'c', 1, Decimal('2.3')]}
serialized_data = self.mixin.serialize(original_data)
self.assertEqual(serialized_data, '{"a": ["b", "c", 1, 2.3]}')
deserialized_data = self.mixin.deserialize(serialized_data)
self.assertEqual(original_data, deserialized_data)
| joeribekker/restorm | restorm/clients/tests/test_jsonclient.py | Python | mit | 2,145 | 0.006527 |
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User, Group
from .endpoint import Endpoint
DEFAULT_STORE_SLUG = getattr(settings, 'DEFAULT_STORE_SLUG', 'public')
class Store(models.Model):
slug = models.SlugField(primary_key=True)
name = models.CharField(max_length=128)
query_endpoint = models.URLField()
update_endpoint = models.URLField(null=True, blank=True)
graph_store_endpoint = models.URLField(null=True, blank=True)
def __unicode__(self):
return self.name
def query(self, *args, **kwargs):
return Endpoint(self.query_endpoint).query(*args, **kwargs)
class Meta:
permissions = (('administer_store', 'can administer'),
('query_store', 'can query'),
('update_store', 'can update'))
class UserPrivileges(models.Model):
user = models.ForeignKey(User, null=True, blank=True)
group = models.ForeignKey(Group, null=True, blank=True)
allow_concurrent_queries = models.BooleanField()
disable_throttle = models.BooleanField()
throttle_threshold = models.FloatField(null=True, blank=True)
deny_threshold = models.FloatField(null=True, blank=True)
intensity_decay = models.FloatField(null=True, blank=True)
disable_timeout = models.BooleanField()
maximum_timeout = models.IntegerField(null=True)
| ox-it/humfrey | humfrey/sparql/models.py | Python | bsd-3-clause | 1,393 | 0.001436 |
#!/usr/bin/env python3
# This file is part of OpenSoccerManager-Editor.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
import re
import unicodedata
import data
import uigtk.dialogs
import uigtk.interface
import uigtk.search
import uigtk.widgets
class Nations(uigtk.widgets.Grid):
name = "Nations"
def __init__(self):
uigtk.widgets.Grid.__init__(self)
self.set_border_width(5)
Nations.search = uigtk.search.Search(data.nations.get_nations)
Nations.search.treeview.connect("row-activated", self.on_row_activated)
Nations.search.treeselection.connect("changed", self.on_treeselection_changed)
self.attach(Nations.search, 0, 0, 1, 1)
self.nationedit = NationEdit()
self.nationedit.set_sensitive(False)
self.attach(self.nationedit, 1, 0, 1, 1)
self.populate_data()
def add_item(self):
'''
Add item into model and load attributes for editing.
'''
nation = data.nations.add_nation()
treeiter = Nations.search.liststore.insert(0, [nation.nationid, ""])
treeiter1 = Nations.search.treemodelfilter.convert_child_iter_to_iter(treeiter)
treeiter2 = Nations.search.treemodelsort.convert_child_iter_to_iter(treeiter1[1])
treepath = Nations.search.treemodelsort.get_path(treeiter2[1])
Nations.search.activate_row(treepath)
self.nationedit.clear_details()
self.nationedit.nation = nation
self.nationedit.entryName.grab_focus()
def remove_item(self, *args):
'''
Query removal of selected nation if dialog enabled.
'''
model, treeiter = Nations.search.treeselection.get_selected()
if treeiter:
nationid = model[treeiter][0]
if data.preferences.confirm_remove:
nation = data.nations.get_nation_by_id(nationid)
dialog = uigtk.dialogs.RemoveItem("Nation", nation.name)
if dialog.show():
self.delete_nation(nationid)
else:
self.delete_nation(nationid)
def delete_nation(self, nationid):
'''
Remove nation from working data and repopulate list.
'''
data.nations.remove_nation(nationid)
self.populate_data()
def on_row_activated(self, treeview, treepath, treeviewcolumn):
'''
Get nation selected and initiate details loading.
'''
treeselection = treeview.get_selection()
model, treeiter = treeselection.get_selected()
if treeiter:
nationid = model[treeiter][0]
self.nationedit.set_details(nationid)
self.nationedit.set_sensitive(True)
data.window.toolbar.toolbuttonRemove.set_sensitive(True)
else:
self.nationedit.clear_details()
self.nationedit.set_sensitive(False)
data.window.toolbar.toolbuttonRemove.set_sensitive(False)
def on_treeselection_changed(self, treeselection):
'''
Update visible details when selection is changed.
'''
model, treeiter = treeselection.get_selected()
if treeiter:
data.window.menu.menuitemRemove.set_sensitive(True)
data.window.toolbar.toolbuttonRemove.set_sensitive(True)
else:
data.window.menu.menuitemRemove.set_sensitive(False)
data.window.toolbar.toolbuttonRemove.set_sensitive(False)
self.nationedit.clear_details()
self.nationedit.set_sensitive(False)
def populate_data(self):
Nations.search.liststore.clear()
for nationid, nation in data.nations.get_nations():
Nations.search.liststore.append([nationid, nation.name])
Nations.search.activate_first_item()
class NationEdit(uigtk.widgets.Grid):
def __init__(self):
uigtk.widgets.Grid.__init__(self)
grid = uigtk.widgets.Grid()
grid.set_hexpand(True)
grid.set_vexpand(True)
self.attach(grid, 0, 0, 1, 1)
label = uigtk.widgets.Label("_Name", leftalign=True)
grid.attach(label, 0, 0, 1, 1)
self.entryName = Gtk.Entry()
label.set_mnemonic_widget(self.entryName)
grid.attach(self.entryName, 1, 0, 1, 1)
label = uigtk.widgets.Label("_Denonym", leftalign=True)
grid.attach(label, 0, 1, 1, 1)
self.entryDenonym = Gtk.Entry()
label.set_mnemonic_widget(self.entryDenonym)
grid.attach(self.entryDenonym, 1, 1, 1, 1)
self.actionbuttons = uigtk.interface.ActionButtons()
self.actionbuttons.buttonUpdate.connect("clicked", self.on_update_clicked)
self.attach(self.actionbuttons, 0, 1, 1, 1)
def on_update_clicked(self, *args):
'''
Update current values into working data.
'''
nation = data.nations.get_nation_by_id(self.nationid)
nation.name = self.entryName.get_text()
nation.denonym = self.entryDenonym.get_text()
model, treeiter = Nations.search.treeselection.get_selected()
child_treeiter = model.convert_iter_to_child_iter(treeiter)
liststore = model.get_model()
liststore[child_treeiter][1] = nation.name
model, treeiter = Nations.search.treeselection.get_selected()
treepath = model.get_path(treeiter)
Nations.search.treeview.scroll_to_cell(treepath)
data.unsaved = True
def set_details(self, nationid):
'''
Load initial data when selection has changed.
'''
self.clear_details()
self.nationid = nationid
nation = data.nations.get_nation_by_id(nationid)
self.entryName.set_text(nation.name)
self.entryDenonym.set_text(nation.denonym)
def clear_details(self):
'''
Clear nation fields to empty.
'''
self.entryName.set_text("")
self.entryDenonym.set_text("")
| OpenSoccerManager/opensoccermanager-editor | uigtk/nations.py | Python | gpl-3.0 | 6,581 | 0.000608 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 500,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| ywcui1990/nupic | tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/a_plus_b/description.py | Python | agpl-3.0 | 2,100 | 0.001905 |
import unittest
import logging
#from datetime import datetime, timedelta
#from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext import testbed
import model
class TestConfig(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
class TestSecrets(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
class TestUser(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
def test_createOrUpdate(self):
'''Create or Update a User'''
fake_email = 'john@smith.com'
fake_stripe_id = 'should there be a test ID for proper testing?'
fake_occupation = 'Lobbyist'
fake_employer = 'Acme'
fake_phone = '800-555-1212'
fake_target = None
logging.info('Testing updating a user that does not exist...')
user0 = model.User.createOrUpdate(
email=fake_email,
occupation = fake_occupation,
employer = fake_employer,
phone= fake_phone )
self.assertEqual(user0.email, fake_email)
self.assertEqual(user0.occupation, fake_occupation)
self.assertEqual(user0.employer, fake_employer)
self.assertEqual(user0.phone, fake_phone)
logging.info('Test updating that user we just created...')
user1 = model.User.createOrUpdate(email=fake_email, occupation='Regulator')
self.assertEqual(user1.occupation, 'Regulator')
#TODO: confirm storage in datastore.
#TODO: see if we can store bad emails and other data
class TestPledge(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
class TestWpPledge(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
class TestShardedCounter(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
class TestFunctions(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
| Rio517/pledgeservice | unittests/test_Model.py | Python | apache-2.0 | 4,051 | 0.007652 |
# Copyright 2012 Dave Hughes.
#
# This file is part of dbsuite.
#
# dbsuite is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# dbsuite is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# dbsuite. If not, see <http://www.gnu.org/licenses/>.
| waveform80/dbsuite | dbsuite/plugins/xml/__init__.py | Python | gpl-3.0 | 683 | 0 |
"""
WSGI config for octo_nemesis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "octo_nemesis.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| monkeywidget/massive-octo-nemesis | octo_nemesis/octo_nemesis/wsgi.py | Python | gpl-2.0 | 399 | 0.002506 |
import unittest
from flask import Flask
from simple_flask_blueprint.rest.blueprint_rest import bp
class SimpleBlueprintRestTest(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.register_blueprint(bp, url_prefix='/test')
self.tester = self.app.test_client(self)
def test_say_hallo(self):
response = self.tester.get('/test/', content_type='application/json')
self.assertEquals(response.status_code, 200)
self.assertEquals(response.data, 'Hallo!')
response = self.tester.get('/test/Kalimaha/', content_type='application/json')
self.assertEquals(response.status_code, 200)
self.assertEquals(response.data, 'Hallo Kalimaha!')
| Kalimaha/simple_flask_blueprint | simple_flask_blueprint_test/rest/blueprint_rest_test.py | Python | gpl-2.0 | 729 | 0.001372 |
# -*- coding: utf-8 -*-
""" Python KNX framework
License
=======
- B{PyKNyX} (U{https://github.com/knxd/pyknyx}) is Copyright:
- © 2016-2017 Matthias Urlichs
- PyKNyX is a fork of pKNyX
- © 2013-2015 Frédéric Mantegazza
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see:
- U{http://www.gnu.org/licenses/gpl.html}
Module purpose
==============
cEMI frame management
Implements
==========
- B{CEMIFactory}
- B{CEMIFactoryValueError}
Documentation
=============
Usage
=====
@author: Frédéric Mantegazza
@author: B. Malinowsky
@copyright: (C) 2013-2015 Frédéric Mantegazza
@copyright: (C) 2006, 2011 B. Malinowsky
@license: GPL
"""
from pyknyx.common.exception import PyKNyXValueError
from pyknyx.services.logger import logging; logger = logging.getLogger(__name__)
from pyknyx.stack.cemi.cemi import CEMIValueError
class CEMIFactoryValueError(PyKNyXValueError):
"""
"""
class CEMIFactory(object):
""" cEMI frame creation handling class
"""
def __init__(self):
"""
"""
super(CEMIFactory, self).__init__()
| knxd/pKNyX | pyknyx/stack/cemi/cemiFactory.py | Python | gpl-3.0 | 1,718 | 0.001754 |
__version__ = '2.0.post6'
| Outernet-Project/librarian-netinterfaces | librarian_netinterfaces/__init__.py | Python | gpl-3.0 | 26 | 0 |
import os
import re
from setuptools import setup
base_path = os.path.dirname(__file__)
def get_long_description():
readme_md = os.path.join(base_path, "README.md")
with open(readme_md) as f:
return f.read()
with open(os.path.join(base_path, "cfscrape", "__init__.py")) as f:
VERSION = re.compile(r'.*__version__ = "(.*?)"', re.S).match(f.read()).group(1)
setup(
name="cfscrape",
packages=["cfscrape"],
version=VERSION,
description='A simple Python module to bypass Cloudflare\'s anti-bot page. See https://github.com/Anorov/cloudflare-scrape for more information.',
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Anorov",
author_email="anorov.vorona@gmail.com",
url="https://github.com/Anorov/cloudflare-scrape",
keywords=["cloudflare", "scraping"],
include_package_data=True,
install_requires=["requests >= 2.23.0"],
)
| Anorov/cloudflare-scrape | setup.py | Python | mit | 943 | 0.002121 |
#!/usr/bin/env python
import json
import os
import requests
import urllib
import ConfigParser
import time
import sys
from bson.json_util import dumps
import pyinotify
from multiprocessing import Process
config = ConfigParser.ConfigParser()
config.read('config.ini')
url = config.get("repl", "api-url")
base = config.get("agent", "base")
group = config.get("agent", "group")
source = config.get("agent", "source")
header={'Authorization':''}
wm = pyinotify.WatchManager() # Watch Manager
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE | pyinotify.IN_CLOSE_WRITE # watched events
def transfer(file):
try:
st=os.stat(base+file)
r = requests.post(url + 'transfer?file=%s&source=%s&group=%s&size=%s&ctime=%s' % (file,source,group,st.st_size,st.st_ctime), headers=header)
print dumps(r.json(),indent=2)
except Exception as e:
print str(e)
def rmkdir(file):
try:
st=os.stat(base+file)
r = requests.get(url + 'mkdir?file=%s&source=%s&group=%s&ctime=%s' % (file,source,group,st.st_ctime), headers=header)
print dumps(r.json(),indent=2)
except Exception as e:
print str(e)
def delete(file):
try:
r = requests.get(url + 'delete?file=%s&source=%s&group=%s' % (file,source,group), headers=header)
print dumps(r.json(),indent=2)
except Exception as e:
print str(e)
def status():
r = requests.get(url + 'status' , headers=header)
print dumps(r.json(),indent=2)
# inotify handler
#
class EventHandler(pyinotify.ProcessEvent):
def process_IN_CLOSE_WRITE(self, event):
file=event.pathname.replace(base,'')
print "Close Write:", file
transfer(file)
def process_IN_CREATE(self, event):
if event.dir == True:
wm.add_watch(event.pathname, mask, rec=True)
dir=event.pathname.replace(base,'')
print "Mkdir:", dir
rmkdir(dir)
def process_IN_DELETE(self, event):
file=event.pathname.replace(base,'')
print "Removing:", file
delete(file)
def update():
while True:
status()
time.sleep(60)
if __name__ == "__main__":
# args=sys.argv[1:]
# if len(args)>1 and args[0]=='transfer':
# transfer(args[1])
# if len(args)>1 and args[0]=='delete':
# delete(args[1])
# if len(args)>0 and args[0]=='status':
# status()
tok=open(os.getenv("HOME")+"/.repl_token")
token=tok.read()
header['Authorization']='Globus-Goauthtoken '+token
p = Process(target=update)
p.start()
handler = EventHandler()
notifier = pyinotify.Notifier(wm, handler)
for root, dirnames, filenames in os.walk(base):
print root
wdd = wm.add_watch(root, mask, rec=True)
notifier.loop()
| sdfdemo/replication | agent/agent.py | Python | mit | 2,792 | 0.027937 |
# _ _ _
# | | | | | |
# | | _ _ | |_ | |__ ___ _ __
# | || | | || __|| '_ \ / _ \| '__|
# | || |_| || |_ | | | || __/| |
# |_| \__,_| \__||_| |_| \___||_|
#
"""
.. module:: luther
:synopsis: lightweight DDNS service with REST API and JS frontend.
.. moduleauthor:: Roland Shoemaker <rolandshoemaker@gmail.com>
"""
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_envvar('LUTHER_SETTINGS')
if app.config.get('OVERRIDE_HTTPS') and app.config['OVERRIDE_HTTPS']:
app.config['ROOT_HTTP'] = 'http://'+app.config['ROOT_DOMAIN']
else:
app.config['ROOT_HTTP'] = 'https://'+app.config['ROOT_DOMAIN']
app.config['SUB_MAX_LENGTH'] = 255-len('.'+app.config['DNS_ROOT_DOMAIN'])
db = SQLAlchemy(app)
from luther.apiv1 import api_v1, run_stats
app.register_blueprint(api_v1, url_prefix='/api/v1')
if app.config['ENABLE_FRONTEND']:
from luther.frontend import frontend
app.register_blueprint(frontend)
from luther.models import init_db
init_db()
if app.config['PROXIED']:
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
timer = run_stats()
| rolandshoemaker/luther | luther/__init__.py | Python | gpl-2.0 | 1,181 | 0.001693 |
# coding: utf-8
{
'name': 'Saudi Arabia - Accounting',
'version': '1.1',
'author': 'DVIT.ME',
'category': 'Localization',
'description': """
Odoo Arabic localization for most arabic countries and Saudi Arabia.
This initially includes chart of accounts of USA translated to Arabic.
In future this module will include some payroll rules for ME .
""",
'website': 'http://www.dvit.me',
'depends': ['account', 'l10n_multilang'],
'data': [
'account.chart.template.xml',
'account.account.template.csv',
'account_chart_template_after.xml',
'account_chart_template.yml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
'post_init_hook': 'load_translations',
}
| vileopratama/vitech | src/addons/l10n_sa/__openerp__.py | Python | mit | 765 | 0 |
from hamcrest.core.assert_that import assert_that
from hamcrest.core.core.isequal import equal_to
from src.business.TemplateManager import TemplateManager
from src.data.template.TemplateReaderFactory import TemplateReaderFactory
__author__ = 'DWI'
import unittest
class TemplateManagerTest(unittest.TestCase):
def test_get_template(self):
reader_factory = TemplateReaderFactory()
manager = TemplateManager(reader_factory)
directory = "./templates"
manager.load_templates(directory)
assert_that(manager.get_template("test").render(), equal_to("123"))
assert_that(manager.get_template("test2").render(), equal_to("123"))
if __name__ == '__main__':
unittest.main()
| DanielWieczorek/FancyReadmeBuilder | test/business/TemplateManagerTest.py | Python | mit | 727 | 0 |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import os
import mimetypes
import urllib
from tweepy.binder import bind_api
from tweepy.error import TweepError
from tweepy.parsers import ModelParser, Parser
from tweepy.utils import list_to_csv
class API(object):
"""Twitter API"""
def __init__(self, auth_handler=None,
host='api.twitter.com', search_host='search.twitter.com',
cache=None, api_root='/1.1', search_root='',
retry_count=0, retry_delay=0, retry_errors=None, timeout=60,
parser=None, compression=False, wait_on_rate_limit=False,
wait_on_rate_limit_notify=False, proxy=''):
""" Api instance Constructor
:param auth_handler:
:param host: url of the server of the rest api, default:'api.twitter.com'
:param search_host: url of the search server, default:'search.twitter.com'
:param cache: Cache to query if a GET method is used, default:None
:param api_root: suffix of the api version, default:'/1.1'
:param search_root: suffix of the search version, default:''
:param retry_count: number of allowed retries, default:0
:param retry_delay: delay in second between retries, default:0
:param retry_errors: default:None
:param timeout: delay before to consider the request as timed out in seconds, default:60
:param parser: ModelParser instance to parse the responses, default:None
:param compression: If the response is compressed, default:False
:param wait_on_rate_limit: If the api wait when it hits the rate limit, default:False
:param wait_on_rate_limit_notify: If the api print a notification when the rate limit is hit, default:False
:param proxy: Url to use as proxy during the HTTP request, default:''
:raise TypeError: If the given parser is not a ModelParser instance.
"""
self.auth = auth_handler
self.host = host
self.search_host = search_host
self.api_root = api_root
self.search_root = search_root
self.cache = cache
self.compression = compression
self.retry_count = retry_count
self.retry_delay = retry_delay
self.retry_errors = retry_errors
self.timeout = timeout
self.wait_on_rate_limit = wait_on_rate_limit
self.wait_on_rate_limit_notify = wait_on_rate_limit_notify
self.parser = parser or ModelParser()
self.proxy = {}
if proxy:
self.proxy['https'] = proxy
# Attempt to explain more clearly the parser argument requirements
# https://github.com/tweepy/tweepy/issues/421
#
parser_type = Parser
if not isinstance(self.parser, parser_type):
raise TypeError(
'"parser" argument has to be an instance of "{}". It is currently a {}.'.format(
parser_type.__name__, type(self.parser)
)
)
@property
def home_timeline(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/statuses/home_timeline
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/home_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
def statuses_lookup(self, id_, include_entities=None, trim_user=None, map_=None):
return self._statuses_lookup(list_to_csv(id_), include_entities,
trim_user, map_)
@property
def _statuses_lookup(self):
""" :reference: https://dev.twitter.com/docs/api/1.5/get/statuses/lookup
:allowed_param:'id', 'include_entities', 'trim_user', 'map'
"""
return bind_api(
api=self,
path='/statuses/lookup.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'include_entities', 'trim_user', 'map'],
require_auth=True
)
@property
def user_timeline(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/statuses/user_timeline
:allowed_param:'id', 'user_id', 'screen_name', 'since_id'
"""
return bind_api(
api=self,
path='/statuses/user_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'since_id',
'max_id', 'count', 'include_rts']
)
@property
def mentions_timeline(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/statuses/mentions_timeline
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/mentions_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def related_results(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/related_results/show/%3id.format
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/related_results/show/{id}.json',
payload_type='relation', payload_list=True,
allowed_param=['id'],
require_auth=False
)
@property
def retweets_of_me(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/statuses/retweets_of_me
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/retweets_of_me.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def get_status(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/statuses/show
:allowed_param:'id
"""
return bind_api(
api=self,
path='/statuses/show.json',
payload_type='status',
allowed_param=['id']
)
@property
def update_status(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/statuses/update
:allowed_param:'status', 'in_reply_to_status_id', 'lat', 'long', 'source', 'place_id', 'display_coordinates'
"""
return bind_api(
api=self,
path='/statuses/update.json',
method='POST',
payload_type='status',
allowed_param=['status', 'in_reply_to_status_id', 'lat', 'long', 'source', 'place_id', 'display_coordinates'],
require_auth=True
)
def update_with_media(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/statuses/update_with_media """
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, 3072, form_field='media[]', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/statuses/update_with_media.json',
method='POST',
payload_type='status',
allowed_param=[
'status', 'possibly_sensitive', 'in_reply_to_status_id', 'lat', 'long',
'place_id', 'display_coordinates'
],
require_auth=True
)(*args, **kwargs)
@property
def destroy_status(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/statuses/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/destroy/{id}.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def retweet(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/statuses/retweet
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/retweet/{id}.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def retweets(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/statuses/retweets
:allowed_param:'id', 'count'
"""
return bind_api(
api=self,
path='/statuses/retweets/{id}.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'count'],
require_auth=True
)
@property
def retweeters(self):
"""
:allowed_param:'id', 'cursor', 'stringify_ids
"""
return bind_api(
api=self,
path='/statuses/retweeters/ids.json',
payload_type='ids',
allowed_param=['id', 'cursor', 'stringify_ids']
)
@property
def get_user(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/users/show
:allowed_param:'id', 'user_id', 'screen_name
"""
return bind_api(
api=self,
path='/users/show.json',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name']
)
@property
def get_oembed(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/statuses/oembed
:allowed_param:'id', 'url', 'maxwidth', 'hide_media', 'omit_script', 'align', 'related', 'lang
"""
return bind_api(
api=self,
path='/statuses/oembed.json',
payload_type='json',
allowed_param=['id', 'url', 'maxwidth', 'hide_media', 'omit_script', 'align', 'related', 'lang']
)
def lookup_users(self, user_ids=None, screen_names=None):
""" Perform bulk look up of users from user ID or screenname """
return self._lookup_users(list_to_csv(user_ids), list_to_csv(screen_names))
@property
def _lookup_users(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/users/lookup.json
allowed_param=['user_id', 'screen_name'],
"""
return bind_api(
api=self,
path='/users/lookup.json',
payload_type='user', payload_list=True,
allowed_param=['user_id', 'screen_name'],
)
def me(self):
""" Get the authenticated user """
return self.get_user(screen_name=self.auth.get_username())
@property
def search_users(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/users/search
:allowed_param:'q', 'count', 'page'
"""
return bind_api(
api=self,
path='/users/search.json',
payload_type='user', payload_list=True,
require_auth=True,
allowed_param=['q', 'count', 'page']
)
@property
def suggested_users(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/users/suggestions/%3slug
:allowed_param:'slug', 'lang
"""
return bind_api(
api=self,
path='/users/suggestions/{slug}.json',
payload_type='user', payload_list=True,
require_auth=True,
allowed_param=['slug', 'lang']
)
@property
def suggested_categories(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/users/suggestions
:allowed_param:'lang'
"""
return bind_api(
api=self,
path='/users/suggestions.json',
payload_type='category', payload_list=True,
allowed_param=['lang'],
require_auth=True
)
@property
def suggested_users_tweets(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/users/suggestions/%3slug/members
:allowed_param:'slug'
"""
return bind_api(
api=self,
path='/users/suggestions/{slug}/members.json',
payload_type='status', payload_list=True,
allowed_param=['slug'],
require_auth=True
)
@property
def direct_messages(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/direct_messages
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/direct_messages.json',
payload_type='direct_message', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def get_direct_message(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/direct_messages/show
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/direct_messages/show/{id}.json',
payload_type='direct_message',
allowed_param=['id'],
require_auth=True
)
@property
def sent_direct_messages(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/direct_messages/sent
:allowed_param:'since_id', 'max_id', 'count', 'page'
"""
return bind_api(
api=self,
path='/direct_messages/sent.json',
payload_type='direct_message', payload_list=True,
allowed_param=['since_id', 'max_id', 'count', 'page'],
require_auth=True
)
@property
def send_direct_message(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/direct_messages/new
:allowed_param:'user', 'screen_name', 'user_id', 'text'
"""
return bind_api(
api=self,
path='/direct_messages/new.json',
method='POST',
payload_type='direct_message',
allowed_param=['user', 'screen_name', 'user_id', 'text'],
require_auth=True
)
@property
def destroy_direct_message(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/delete/direct_messages/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/direct_messages/destroy.json',
method='POST',
payload_type='direct_message',
allowed_param=['id'],
require_auth=True
)
@property
def create_friendship(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/friendships/create
:allowed_param:'id', 'user_id', 'screen_name', 'follow'
"""
return bind_api(
api=self,
path='/friendships/create.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name', 'follow'],
require_auth=True
)
@property
def destroy_friendship(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/delete/friendships/destroy
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/friendships/destroy.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def show_friendship(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/friendships/show
:allowed_param:'source_id', 'source_screen_name',
"""
return bind_api(
api=self,
path='/friendships/show.json',
payload_type='friendship',
allowed_param=['source_id', 'source_screen_name',
'target_id', 'target_screen_name']
)
def lookup_friendships(self, user_ids=None, screen_names=None):
""" Perform bulk look up of friendships from user ID or screenname """
return self._lookup_friendships(list_to_csv(user_ids), list_to_csv(screen_names))
@property
def _lookup_friendships(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/friendships/lookup
:allowed_param:'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/friendships/lookup.json',
payload_type='relationship', payload_list=True,
allowed_param=['user_id', 'screen_name'],
require_auth=True
)
@property
def friends_ids(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/friends/ids
:allowed_param:'id', 'user_id', 'screen_name', 'cursor
"""
return bind_api(
api=self,
path='/friends/ids.json',
payload_type='ids',
allowed_param=['id', 'user_id', 'screen_name', 'cursor']
)
@property
def friends(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/friends/list
:allowed_param:'id', 'user_id', 'screen_name', 'cursor
"""
return bind_api(
api=self,
path='/friends/list.json',
payload_type='user', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'cursor']
)
@property
def friendships_incoming(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/friendships/incoming
:allowed_param:'cursor
"""
return bind_api(
api=self,
path='/friendships/incoming.json',
payload_type='ids',
allowed_param=['cursor']
)
@property
def friendships_outgoing(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/friendships/outgoing
:allowed_param:'cursor
"""
return bind_api(
api=self,
path='/friendships/outgoing.json',
payload_type='ids',
allowed_param=['cursor']
)
@property
def followers_ids(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/followers/ids
:allowed_param:'id', 'user_id', 'screen_name', 'cursor', 'count
"""
return bind_api(
api=self,
path='/followers/ids.json',
payload_type='ids',
allowed_param=['id', 'user_id', 'screen_name', 'cursor', 'count']
)
@property
def followers(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/followers/list
:allowed_param:'id', 'user_id', 'screen_name', 'cursor', 'count', 'skip_status', 'include_user_entities'
"""
return bind_api(
api=self,
path='/followers/list.json',
payload_type='user', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'cursor', 'count',
'skip_status', 'include_user_entities']
)
def verify_credentials(self, **kargs):
""" account/verify_credentials """
try:
return bind_api(
api=self,
path='/account/verify_credentials.json',
payload_type='user',
require_auth=True,
allowed_param=['include_entities', 'skip_status'],
)(**kargs)
except TweepError as e:
if e.response and e.response.status == 401:
return False
raise
@property
def rate_limit_status(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/account/rate_limit_status
:allowed_param:'resources'
"""
return bind_api(
api=self,
path='/application/rate_limit_status.json',
payload_type='json',
allowed_param=['resources'],
use_cache=False
)
@property
def set_delivery_device(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/account/update_delivery_device
:allowed_param:'device'
"""
return bind_api(
api=self,
path='/account/update_delivery_device.json',
method='POST',
allowed_param=['device'],
payload_type='user',
require_auth=True
)
@property
def update_profile_colors(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/account/update_profile_colors
:allowed_param:'profile_background_color', 'profile_text_color', 'profile_link_color', 'profile_sidebar_fill_color', 'profile_sidebar_border_color'],
"""
return bind_api(
api=self,
path='/account/update_profile_colors.json',
method='POST',
payload_type='user',
allowed_param=['profile_background_color', 'profile_text_color',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_sidebar_border_color'],
require_auth=True
)
def update_profile_image(self, filename, file_=None):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/account/update_profile_image """
headers, post_data = API._pack_image(filename, 700, f=file_)
return bind_api(
api=self,
path='/account/update_profile_image.json',
method='POST',
payload_type='user',
allowed_param=['include_entities', 'skip_status'],
require_auth=True
)(self, post_data=post_data, headers=headers)
def update_profile_background_image(self, filename, **kargs):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/account/update_profile_background_image """
f = kargs.pop('file', None)
headers, post_data = API._pack_image(filename, 800, f=f)
bind_api(
api=self,
path='/account/update_profile_background_image.json',
method='POST',
payload_type='user',
allowed_param=['tile', 'include_entities', 'skip_status', 'use'],
require_auth=True
)(self, post_data=post_data, headers=headers)
def update_profile_banner(self, filename, **kargs):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/account/update_profile_banner """
f = kargs.pop('file', None)
headers, post_data = API._pack_image(filename, 700, form_field="banner", f=f)
bind_api(
api=self,
path='/account/update_profile_banner.json',
method='POST',
allowed_param=['width', 'height', 'offset_left', 'offset_right'],
require_auth=True
)(self, post_data=post_data, headers=headers)
@property
def update_profile(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/account/update_profile
:allowed_param:'name', 'url', 'location', 'description'
"""
return bind_api(
api=self,
path='/account/update_profile.json',
method='POST',
payload_type='user',
allowed_param=['name', 'url', 'location', 'description'],
require_auth=True
)
@property
def favorites(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/favorites
:allowed_param:'screen_name', 'user_id', 'max_id', 'count', 'since_id', 'max_id
"""
return bind_api(
api=self,
path='/favorites/list.json',
payload_type='status', payload_list=True,
allowed_param=['screen_name', 'user_id', 'max_id', 'count', 'since_id', 'max_id']
)
@property
def create_favorite(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/favorites/create
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/favorites/create.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def destroy_favorite(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/favorites/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/favorites/destroy.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def create_block(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/blocks/create
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/blocks/create.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def destroy_block(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/delete/blocks/destroy
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/blocks/destroy.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def blocks(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/blocks/blocking
:allowed_param:'cursor'
"""
return bind_api(
api=self,
path='/blocks/list.json',
payload_type='user', payload_list=True,
allowed_param=['cursor'],
require_auth=True
)
@property
def blocks_ids(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/blocks/ids """
return bind_api(
api=self,
path='/blocks/ids.json',
payload_type='json',
require_auth=True
)
@property
def report_spam(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/report_spam
:allowed_param:'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/users/report_spam.json',
method='POST',
payload_type='user',
allowed_param=['user_id', 'screen_name'],
require_auth=True
)
@property
def saved_searches(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/saved_searches/show/%3Aid """
return bind_api(
api=self,
path='/saved_searches/list.json',
payload_type='saved_search', payload_list=True,
require_auth=True
)
@property
def get_saved_search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/saved_searches/show/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/saved_searches/show/{id}.json',
payload_type='saved_search',
allowed_param=['id'],
require_auth=True
)
@property
def create_saved_search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/saved_searches/create
:allowed_param:'query'
"""
return bind_api(
api=self,
path='/saved_searches/create.json',
method='POST',
payload_type='saved_search',
allowed_param=['query'],
require_auth=True
)
@property
def destroy_saved_search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/saved_searches/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/saved_searches/destroy/{id}.json',
method='POST',
payload_type='saved_search',
allowed_param=['id'],
require_auth=True
)
@property
def create_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/create
:allowed_param:'name', 'mode', 'description'
"""
return bind_api(
api=self,
path='/lists/create.json',
method='POST',
payload_type='list',
allowed_param=['name', 'mode', 'description'],
require_auth=True
)
@property
def destroy_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/destroy
:allowed_param:'owner_screen_name', 'owner_id', 'list_id', 'slug'
"""
return bind_api(
api=self,
path='/lists/destroy.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'owner_id', 'list_id', 'slug'],
require_auth=True
)
@property
def update_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/update
:allowed_param: list_id', 'slug', 'name', 'mode', 'description', 'owner_screen_name', 'owner_id'
"""
return bind_api(
api=self,
path='/lists/update.json',
method='POST',
payload_type='list',
allowed_param=['list_id', 'slug', 'name', 'mode', 'description', 'owner_screen_name', 'owner_id'],
require_auth=True
)
@property
def lists_all(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/list
:allowed_param:'screen_name', 'user_id'
"""
return bind_api(
api=self,
path='/lists/list.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id'],
require_auth=True
)
@property
def lists_memberships(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/memberships
:allowed_param:'screen_name', 'user_id', 'filter_to_owned_lists', 'cursor'
"""
return bind_api(
api=self,
path='/lists/memberships.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id', 'filter_to_owned_lists', 'cursor'],
require_auth=True
)
@property
def lists_subscriptions(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/subscriptions
:allowed_param:'screen_name', 'user_id', 'cursor'
"""
return bind_api(
api=self,
path='/lists/subscriptions.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id', 'cursor'],
require_auth=True
)
@property
def list_timeline(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/statuses
:allowed_param:'owner_screen_name', 'slug', 'owner_id', 'list_id', 'since_id', 'max_id', 'count', 'include_rts
"""
return bind_api(
api=self,
path='/lists/statuses.json',
payload_type='status', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'owner_id', 'list_id', 'since_id', 'max_id', 'count', 'include_rts']
)
@property
def get_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/show
:allowed_param:'owner_screen_name', 'owner_id', 'slug', 'list_id
"""
return bind_api(
api=self,
path='/lists/show.json',
payload_type='list',
allowed_param=['owner_screen_name', 'owner_id', 'slug', 'list_id']
)
@property
def add_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/create
:allowed_param:'screen_name', 'user_id', 'owner_screen_name', 'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/members/create.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'owner_screen_name', 'owner_id', 'slug', 'list_id'],
require_auth=True
)
@property
def remove_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/destroy
:allowed_param:'screen_name', 'user_id', 'owner_screen_name', 'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/members/destroy.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'owner_screen_name', 'owner_id', 'slug', 'list_id'],
require_auth=True
)
def add_list_members(self, screen_name=None, user_id=None, slug=None,
list_id=None, owner_id=None, owner_screen_name=None):
""" Perform bulk add of list members from user ID or screenname """
return self._add_list_members(list_to_csv(screen_name),
list_to_csv(user_id),
slug, list_id, owner_id,
owner_screen_name)
@property
def _add_list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/create_all
:allowed_param:'screen_name', 'user_id', 'slug', 'list_id', 'owner_id', 'owner_screen_name'
"""
return bind_api(
api=self,
path='/lists/members/create_all.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'slug', 'list_id', 'owner_id', 'owner_screen_name'],
require_auth=True
)
def remove_list_members(self, screen_name=None, user_id=None, slug=None,
list_id=None, owner_id=None, owner_screen_name=None):
""" Perform bulk remove of list members from user ID or screenname """
return self._remove_list_members(list_to_csv(screen_name),
list_to_csv(user_id),
slug, list_id, owner_id,
owner_screen_name)
@property
def _remove_list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/destroy_all
:allowed_param:'screen_name', 'user_id', 'slug', 'list_id', 'owner_id', 'owner_screen_name'
"""
return bind_api(
api=self,
path='/lists/members/destroy_all.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'slug', 'list_id', 'owner_id', 'owner_screen_name'],
require_auth=True
)
@property
def list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/members
:allowed_param:'owner_screen_name', 'slug', 'list_id', 'owner_id', 'cursor
"""
return bind_api(
api=self,
path='/lists/members.json',
payload_type='user', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'list_id', 'owner_id', 'cursor']
)
@property
def show_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/members/show
:allowed_param:'list_id', 'slug', 'user_id', 'screen_name', 'owner_screen_name', 'owner_id
"""
return bind_api(
api=self,
path='/lists/members/show.json',
payload_type='user',
allowed_param=['list_id', 'slug', 'user_id', 'screen_name', 'owner_screen_name', 'owner_id']
)
@property
def subscribe_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/subscribers/create
:allowed_param:'owner_screen_name', 'slug', 'owner_id', 'list_id'
"""
return bind_api(
api=self,
path='/lists/subscribers/create.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'slug', 'owner_id', 'list_id'],
require_auth=True
)
@property
def unsubscribe_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/subscribers/destroy
:allowed_param:'owner_screen_name', 'slug', 'owner_id', 'list_id'
"""
return bind_api(
api=self,
path='/lists/subscribers/destroy.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'slug', 'owner_id', 'list_id'],
require_auth=True
)
@property
def list_subscribers(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/subscribers
:allowed_param:'owner_screen_name', 'slug', 'owner_id', 'list_id', 'cursor
"""
return bind_api(
api=self,
path='/lists/subscribers.json',
payload_type='user', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'owner_id', 'list_id', 'cursor']
)
@property
def show_list_subscriber(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/subscribers/show
:allowed_param:'owner_screen_name', 'slug', 'screen_name', 'owner_id', 'list_id', 'user_id
"""
return bind_api(
api=self,
path='/lists/subscribers/show.json',
payload_type='user',
allowed_param=['owner_screen_name', 'slug', 'screen_name', 'owner_id', 'list_id', 'user_id']
)
@property
def trends_available(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/trends/available """
return bind_api(
api=self,
path='/trends/available.json',
payload_type='json'
)
@property
def trends_place(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/trends/place
:allowed_param:'id', 'exclude
"""
return bind_api(
api=self,
path='/trends/place.json',
payload_type='json',
allowed_param=['id', 'exclude']
)
@property
def trends_closest(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/trends/closest
:allowed_param:'lat', 'long
"""
return bind_api(
api=self,
path='/trends/closest.json',
payload_type='json',
allowed_param=['lat', 'long']
)
@property
def search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/search
:allowed_param:'q', 'lang', 'locale', 'since_id', 'geocode', 'max_id', 'since', 'until', 'result_type', 'count', 'include_entities', 'from', 'to', 'source']
"""
return bind_api(
api=self,
path='/search/tweets.json',
payload_type='search_results',
allowed_param=['q', 'lang', 'locale', 'since_id', 'geocode', 'max_id', 'since', 'until', 'result_type',
'count', 'include_entities', 'from', 'to', 'source']
)
@property
def trends_daily(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/trends/daily
:allowed_param:'date', 'exclude
"""
return bind_api(
api=self,
path='/trends/daily.json',
payload_type='json',
allowed_param=['date', 'exclude']
)
@property
def trends_weekly(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/trends/weekly
:allowed_param:'date', 'exclude
"""
return bind_api(
api=self,
path='/trends/weekly.json',
payload_type='json',
allowed_param=['date', 'exclude']
)
@property
def reverse_geocode(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/geo/reverse_geocode
:allowed_param:'lat', 'long', 'accuracy', 'granularity', 'max_results
"""
return bind_api(
api=self,
path='/geo/reverse_geocode.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'accuracy', 'granularity', 'max_results']
)
@property
def geo_id(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/geo/id
:allowed_param:'id
"""
return bind_api(
api=self,
path='/geo/id/{id}.json',
payload_type='place',
allowed_param=['id']
)
@property
def geo_search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/geo/search
:allowed_param:'lat', 'long', 'query', 'ip', 'granularity', 'accuracy', 'max_results', 'contained_within
"""
return bind_api(
api=self,
path='/geo/search.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'query', 'ip', 'granularity', 'accuracy', 'max_results', 'contained_within']
)
@property
def geo_similar_places(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/geo/similar_places
:allowed_param:'lat', 'long', 'name', 'contained_within
"""
return bind_api(
api=self,
path='/geo/similar_places.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'name', 'contained_within']
)
@property
def supported_languages(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/help/languages """
return bind_api(
api=self,
path='/help/languages.json',
payload_type='json',
require_auth=True
)
@property
def configuration(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/help/configuration """
return bind_api(
api=self,
path='/help/configuration.json',
payload_type='json',
require_auth=True
)
""" Internal use only """
@staticmethod
def _pack_image(filename, max_size, form_field="image", f=None):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
if f is None:
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than 700kb.')
except os.error:
raise TweepError('Unable to access file')
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
if f.tell() > (max_size * 1024):
raise TweepError('File is too big, must be less than 700kb.')
f.seek(0) # Reset to beginning of file
fp = f
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
if isinstance(filename, unicode):
filename = filename.encode("utf-8")
filename = filename.encode("utf-8")
BOUNDARY = 'Tw3ePy'
body = []
body.append('--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (form_field, filename))
body.append('Content-Type: %s' % file_type)
body.append('')
body.append(fp.read())
body.append('--' + BOUNDARY + '--')
body.append('')
fp.close()
body = '\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body
| svven/tweepy | tweepy/api.py | Python | mit | 44,828 | 0.002521 |
import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("arandr"),
("atom"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cowsay"),
("cron"),
("curl"),
("deluge"),
("diod"),
("docker-ce"),
("dropbox"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("gnupg2"),
("gnupg-agent"),
("hardinfo"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("ipython"),
("jq"),
("language-pack-en-base"),
("laptop-mode-tools"),
("meld"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("pavucontrol"),
("pinta"),
("pulseaudio"),
("pulseaudio-module-x11"),
("pulseaudio-utils"),
("python"),
("python-pip"),
("scrot"),
("sl"),
("slack-desktop"),
("software-properties-common"),
("suckless-tools"),
("sysdig"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("virtualbox"),
("vlc"),
("wget"),
("wireshark"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(host, name):
pkg = host.package(name)
assert pkg.is_installed
| wicksy/laptop-build | test/test_packages.py | Python | mit | 1,214 | 0.003295 |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import views as user_views
from openstack_dashboard.utils import filters
from openstack_dashboard.dashboards.admin.networks.agents \
import tables as agents_tables
from openstack_dashboard.dashboards.admin.networks \
import forms as project_forms
from openstack_dashboard.dashboards.admin.networks.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.admin.networks.subnets \
import tables as subnets_tables
from openstack_dashboard.dashboards.admin.networks \
import tables as networks_tables
class IndexView(tables.DataTableView):
table_class = networks_tables.NetworksTable
template_name = 'admin/networks/index.html'
page_title = _("Networks")
@memoized.memoized_method
def _get_tenant_list(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"networks' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
return tenant_dict
def _get_agents_data(self, network):
agents = []
data = _("Unknown")
try:
if api.neutron.is_extension_supported(self.request,
'dhcp_agent_scheduler'):
# This method is called for each network. If agent-list cannot
# be retrieved, we will see many pop-ups. So the error message
# will be popup-ed in get_data() below.
agents = api.neutron.list_dhcp_agent_hosting_networks(
self.request, network)
data = len(agents)
except Exception:
self.exception = True
return data
def get_data(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _('Network list can not be retrieved.')
exceptions.handle(self.request, msg)
if networks:
self.exception = False
tenant_dict = self._get_tenant_list()
for n in networks:
# Set tenant name
tenant = tenant_dict.get(n.tenant_id, None)
n.tenant_name = getattr(tenant, 'name', None)
n.num_agents = self._get_agents_data(n.id)
if self.exception:
msg = _('Unable to list dhcp agents hosting network.')
exceptions.handle(self.request, msg)
return networks
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateNetwork
template_name = 'admin/networks/create.html'
success_url = reverse_lazy('horizon:admin:networks:index')
page_title = _("Create Network")
class DetailView(tables.MultiTableView):
table_classes = (subnets_tables.SubnetsTable,
ports_tables.PortsTable,
agents_tables.DHCPAgentsTable)
template_name = 'project/networks/detail.html'
page_title = _("Network Details: {{ network.name }}")
def get_subnets_data(self):
try:
network_id = self.kwargs['network_id']
subnets = api.neutron.subnet_list(self.request,
network_id=network_id)
except Exception:
subnets = []
msg = _('Subnet list can not be retrieved.')
exceptions.handle(self.request, msg)
return subnets
def get_ports_data(self):
try:
network_id = self.kwargs['network_id']
ports = api.neutron.port_list(self.request, network_id=network_id)
except Exception:
ports = []
msg = _('Port list can not be retrieved.')
exceptions.handle(self.request, msg)
return ports
def get_agents_data(self):
agents = []
try:
network_id = self.kwargs['network_id']
agents = api.neutron.list_dhcp_agent_hosting_networks(self.request,
network_id)
except Exception:
msg = _('Unable to list dhcp agents hosting network.')
exceptions.handle(self.request, msg)
return agents
@memoized.memoized_method
def _get_data(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve details for '
'network "%s".') % network_id,
redirect=self.get_redirect_url())
return network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
network = self._get_data()
# Needs to exclude agents table if dhcp-agent-scheduler extension
# is not supported.
try:
dhcp_agent_support = api.neutron.is_extension_supported(
self.request, 'dhcp_agent_scheduler')
context['dhcp_agent_support'] = dhcp_agent_support
except Exception:
context['dhcp_agent_support'] = False
table = networks_tables.NetworksTable(self.request)
context["network"] = network
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(network)
choices = networks_tables.project_tables.STATUS_DISPLAY_CHOICES
network.status_label = (
filters.get_display_label(choices, network.status))
choices = networks_tables.DISPLAY_CHOICES
network.admin_state_label = (
filters.get_display_label(choices, network.admin_state))
return context
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:admin:networks:index')
class UpdateView(user_views.UpdateView):
form_class = project_forms.UpdateNetwork
template_name = 'admin/networks/update.html'
success_url = reverse_lazy('horizon:admin:networks:index')
submit_url = "horizon:admin:networks:update"
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up'],
'shared': network['shared'],
'external': network['router__external']}
| redhat-cip/horizon | openstack_dashboard/dashboards/admin/networks/views.py | Python | apache-2.0 | 7,648 | 0 |
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import sys
import logging
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_babel import Babel
LOGGER = logging.getLogger(__name__)
def to_list(obj):
obj_type = type(obj)
if obj_type is str:
return obj.replace(' ', '').split(',')
elif obj_type is list:
return obj
elif obj_type is set:
return list(obj)
else:
raise TypeError('unknown type for Plugin: %s' + str(obj_type))
class App:
"""
Singleton: sole static instance of Flask App
"""
app_instance = None
db_instance = None
babel_instance = None
plugins_instance = None
home_dir = None
count = 0
@staticmethod
def init():
# Do init once
app = Flask(__name__)
# Read and override configs
app.config.from_pyfile('config_main.py')
app.config.from_pyfile('../instance/config_site.py')
# Global Logging config
logging.basicConfig(level=int(app.config['GHC_LOG_LEVEL']),
format=app.config['GHC_LOG_FORMAT'])
app.config['GHC_SITE_URL'] = \
app.config['GHC_SITE_URL'].rstrip('/')
app.secret_key = app.config['SECRET_KEY']
App.db_instance = SQLAlchemy(app)
App.babel_instance = Babel(app)
# Plugins (via Docker ENV) must be list, but may have been
# specified as comma-separated string, or older set notation
app.config['GHC_PLUGINS'] = to_list(app.config['GHC_PLUGINS'])
app.config['GHC_USER_PLUGINS'] = \
to_list(app.config['GHC_USER_PLUGINS'])
# Concatenate core- and user-Plugins
App.plugins_instance = \
app.config['GHC_PLUGINS'] + app.config['GHC_USER_PLUGINS']
# Needed to find Plugins
home_dir = os.path.dirname(os.path.abspath(__file__))
App.home_dir = sys.path.append('%s/..' % home_dir)
# Finally assign app-instance
App.app_instance = app
App.count += 1
LOGGER.info("created GHC App instance #%d" % App.count)
@staticmethod
def get_app():
return App.app_instance
@staticmethod
def get_babel():
return App.babel_instance
@staticmethod
def get_config():
return App.app_instance.config
@staticmethod
def get_db():
return App.db_instance
@staticmethod
def get_home_dir():
return App.home_dir
@staticmethod
def get_plugins():
return App.plugins_instance
App.init()
| tomkralidis/GeoHealthCheck | GeoHealthCheck/init.py | Python | mit | 3,809 | 0 |
'''
The MIT License (MIT)
Copyright (c) 2016 Vasileios Kagklis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import glob, nltk, time, os, re, subprocess, math, urllib2, socket, itertools
from nltk.corpus import wordnet
from bs4 import BeautifulSoup,SoupStrainer
from operator import itemgetter
from xml.etree.ElementTree import ElementTree
from itertools import groupby
def most_common_func(L):
return max(groupby(sorted(L)), key=lambda(x, v):(len(list(v)),-L.index(x)))[0]
#folder with txt, pdf & html files - must be located in the same dir with the script
path = os.getcwd()
tkn_path = path + "\\tokenize\\"
tag_path = path + "\\tagged\\"
types = ['*.txt', '*.html', '*.htm']
while(1):
x = raw_input("Press:\n1. Run profile analysis \
\n2. Enter index mode\n3. Exit Program\nYour option: ")
accepted_inputs = ['1','2','3']
while(x not in accepted_inputs):
x = raw_input("Invalid input!!! Press:\n1. Run profile analysis \
\n2. Enter index mode\n3. Exit Program\nYour option: ")
#----------------------------------- READ BOOKMARK FILE -------------------------------------#
if(x is '1'):
if not(os.path.exists(path+"\profile")):
print "\nThere is no profile folder. Create a folder with the name profile and put your files in it. Then re-try...\n"
continue
inverted_index = {}
#noun_dictionary -> {noun1:{doc_id1:weight1, doc_id2:weight2, ...}, noun2:... }
noun_dictionary = {}
t0=time.time()
try:
fh = open('bookmarks.html', 'r')
except:
print "\nThere is no file named bookmarks.html! Export bookmarks in HTML and re-try...\n"
continue
page = fh.read()
fh.close()
#------------------------- EXTRACT LINKS AND PUT THEM ON A TXT/LIST -------------------------#
with open('url_links.txt', 'w') as fh:
for link in BeautifulSoup(page, 'html.parser', parse_only=SoupStrainer('a')):
if link.has_key('href') and re.match('^http',link['href']):
fh.write(link['href']+"\n")
#---------------------------- DOWNLOAD WEB PAGES - .HTML FILES ------------------------------#
i=1
fh = open('url_links.txt', 'r')
for link in fh.readlines():
request = urllib2.Request(link)
opener = urllib2.build_opener()
try:
filehandle = opener.open(request, timeout=5)
myFile = open(path+'\\profile\\'+'bookmark%04d.html'%i,'w')
myFile.write(link)
myFile.write(filehandle.read())
except urllib2.URLError:
filehandle.close()
continue
except socket.timeout:
filehandle.close()
continue
except:
myFile.close()
filehandle.close()
continue
i += 1
myFile.close()
filehandle.close()
print ("\nBookmarked web pages have been succesfuly downloaded!\nProceeding...")
try:
os.mkdir("tokenize")
os.mkdir("tagged")
except WindowsError:
pass
tokens_list=[]
squares = []
open_class_cat =['JJ','JJR','JJS','NN','NNS','NP','NPS','NNP','NNPS','RB','RBR','RBS','VV','VVD','VVG','VVN','VVP','VVZ','FW']
#-------------------------------- START TO LOOP INTO FILES ----------------------------------#
#dictionary with id - path/url correspondence
dic = {}
i = 1
N = 0
for file_type in types:
N += len(glob.glob(path+"\\profile\\"+file_type))
for file_type in types:
for doc in glob.glob(path+"\\profile\\"+file_type):
if not(re.match('.+bookmark\d{4}\.html$', doc)):
dic[i] = os.path.join(path+"\profile\\",doc)
else:
with open(doc, 'r') as fh:
link = fh.readline()
dic[i] = re.compile('\n').sub('',link)
#-------------------------------------- TOKENIZATION ----------------------------------------#
#exclude files that contain no latin characters
try:
fh = open(doc,'r')
s = fh.read()
if not(re.match('((.|\n)*[a-zA-Z]+(.|\n)*)+', s)):
continue
except IOError:
pass
finally:
fh.close()
s = BeautifulSoup(s, 'html.parser')
tokens_list = nltk.word_tokenize(s.get_text())
tkn = "tokenized_text_%04d.txt"%i
with open(tkn_path+tkn,'w') as fh:
for each_token in tokens_list:
if not(re.search('&',each_token)) and not(each_token.isspace()):
fh.write(each_token.encode('utf-8'))
fh.write("\n")
#------------------------------------------ TAGGING -----------------------------------------#
tag = "tagged_output_%04d.txt"%i
subprocess.call('.\\TreeTagger\\bin\\tree-tagger.exe -token -lemma .\\TreeTagger\\lib\\english.par "'+tkn_path+tkn+'">"'+tag_path+tag+'"',shell=True)
#-------------------------------------- REMOVE STOP WORDS -----------------------------------#
with open(tag_path+tag, 'r') as fh:
lemmas = []
for line in fh.readlines():
op = line.split()
if ((op[1] in open_class_cat) and (op[2] != '<unknown>') and (op[2] != '@card@')and (op[2] != '@ord@')and (op[2] != '%')):
p = re.compile('(^[\w]{1}$)|(^[\w]+[\.]$)|(^[\w]-[0-9]+)|(^[\w]-[\w])|(^[\w]-)|(-[\w]-[\w]-[\w])|([0-9]+-[0-9]+)|(^[0-9]+$)|((^[\w])([\d]$))')
op[2] = p.sub('', op[2])
#------------------------------- START CREATING THE INVERTED INDEX --------------------------#
if (op[2] != ''):
if op[2].lower() not in inverted_index:
inverted_index[op[2].lower()] = {}
lemmas.append(op[2].lower())
if(op[2].lower() not in noun_dictionary and (op[1] == 'NN' or op[1] == 'NNS') and op[2] != '<unknown>'):
noun_dictionary[op[2].lower()] = {}
if ((op[1] == 'NN' or op[1] == 'NNS') and op[2] != '<unknown>'):
noun_dictionary[op[2].lower()][i] = 0
u_lemmas = list(set(lemmas))
#--------------------------------- CALCULATING SUM OF (tf*idf)^2 ----------------------------#
squares.append(0)
for lemma in u_lemmas:
inverted_index[lemma][i] = int(lemmas.count(lemma))
tf = float(lemmas.count(lemma))
if lemma in noun_dictionary.keys():
noun_dictionary[lemma][i] = tf
idf = float(math.log10(N/len(inverted_index[lemma])))
squares[i-1] += float(pow(tf*idf,2))
i += 1
#------------------------ CREATING INVERTED INDEX AND SAVING IT IN XML FILE -----------------#
del u_lemmas, lemmas
top20 = []
with open("inverted_index.xml", 'w') as fh_index:
fh_index.write('<?xml version=\"1.0\" ?>\n')
fh_index.write('<inverted_index>\n')
for lemma in inverted_index:
fh_index.write("\t<lemma name=\""+lemma+"\">\n")
for doc_id,term_frequency in inverted_index[lemma].items():
tf = float(term_frequency)
#idf=log10(total documents/number of documents that contain lemma)
idf=float(math.log10(N/ len(inverted_index[lemma])))
weight=float(float(tf*idf)/float(math.sqrt(squares[doc_id-1])+1))
inverted_index[lemma][doc_id] = weight
fh_index.write("\t\t<document id=\"%d\" weight=\"%f\"/>\n"%(doc_id,weight))
fh_index.write('\t</lemma>\n')
fh_index.write('</inverted_index>\n')
fh_index.write('<doc_index>\n')
for i in dic:
fh_index.write('\t<matching id="%d" path="'%i+dic[i]+'"\>\n')
fh_index.write('</doc_index>\n')
#------------------------------- FIND TOP 20 POPULAR NOUNS ----------------------------------#
noun_list = []
noun_freq_list = []
for lemma in noun_dictionary:
sum_w = 0
for freq in noun_dictionary[lemma].values():
sum_w += freq
noun_list.append(lemma)
noun_freq_list.append(float(sum_w/N))
for j in range(len(noun_list)):
top20.append((noun_list[j],noun_freq_list[j]))
top20 = sorted(top20, key=itemgetter(1),reverse=True)
top20 = top20[:20]
#--------------------------------- DESTROY REDUNDANT ITEMS ----------------------------------#
del tokens_list, noun_dictionary, noun_list, noun_freq_list, squares
#---------------------------------- RUN PROFILE ANALYSIS ------------------------------------#
step = 4
const_step = step
top20=top20+top20[:step]
WSD = {}
while(step<=len(top20)):
#print step
syns = []
pointer = []
if step<=20:
pointer=range(step-const_step,step)
else:
pointer=range(step-const_step,20)
pointer +=range(0,step-20)
for j in pointer:
if(wordnet.synsets(top20[j][0], pos=wordnet.NOUN)):
syns.append(wordnet.synsets(top20[j][0], pos=wordnet.NOUN))
else:
syns.append((1,1))
confs = [()]
for x in syns:
confs = [i + (y,) for y in x for i in confs]
max_conf=0
max_sim=0
for conf in confs:
combinations = list(itertools.combinations(conf,2))
sim = 0
for pair in combinations:
if(pair[0] is not 1 and pair[1] is not 1):
sim += wordnet.wup_similarity(pair[0], pair[1])
sim = float(sim)/float(len(combinations))
if(sim >= max_sim):
max_sim = sim
max_conf = confs.index(conf)
j=0
for element in confs[max_conf]:
if pointer[j] not in WSD:
WSD[pointer[j]] = []
WSD[pointer[j]].append(element)
j += 1
step += 1
t1 = time.time()
time = (t1-t0)
minutes = time/60
sec = time%60
print ("Profile Analysis completed in %d minutes and %d seconds"%(minutes, sec))
print ("Your interests are represented from the following nouns and their definitions: \n")
j=0
for element in WSD:
if most_common_func(WSD[j]) is not 1:
print (most_common_func(WSD[j]).name()+": "+most_common_func(WSD[j]).definition())
j+=1
#------------------------- LOADING INVERTED INDEX FROM XML FILE -----------------------------#
elif(x is '2'):
flag = 0
try:
len(dic)
except NameError:
dic = {}
flag = 1
else:
pass
try:
len(inverted_index)
except NameError:
print "No index was created recently! Checking for a saved copy... "
try:
with open('inverted_index.xml') as f: pass
except IOError as e:
print 'There was no saved index found!\n\n'
else:
print "A saved index was found. Loading...!"
inverted_index = {}
fh = open("./inverted_index.xml", 'r')
for line in fh.readlines():
if(re.match('(.|\n)*<lemma', line)):
lemma = re.search('"(.*)"', line).group(1)
inverted_index[lemma] = {}
elif(re.match('(.|\n)*<document', line)):
op = line.split('"')
inverted_index[lemma][int(op[1])] = float(op[3])
elif(re.match('(.|\n)*<matching', line) and flag):
op = line.split('"')
dic[int(op[1])] = op[3]
else:
continue
#------------------------------ SEARCH QUERY IN INVERTED INDEX ------------------------------#
try:
len(inverted_index)
except NameError:
print "\nIndex hasn't been created or loaded!\n"
else:
while(True):
import time
text_ID_list = []
weight_list = []
index_result = []
query = raw_input('Please insert queries or -1 to exit: \n')
if query == '-1':
print "Exiting Index...\n\n"
break
t0 = time.time()
query_list = query.split()
for each_query in query_list:
if each_query in inverted_index.keys():
for text_id, weight in inverted_index[each_query].items():
if text_id not in text_ID_list:
text_ID_list.append(text_id)
weight_list.append(weight)
else:
text_pointer = text_ID_list.index(text_id)
else:
print("\nCouldn't be found in index!!\n")
break
for j in range(len(text_ID_list)):
if weight_list[j] > 0:
index_result.append((text_ID_list[j],weight_list[j]))
query_xml = sorted(index_result, key=itemgetter(1),reverse=True)
t1 = time.time()
time = (t1-t0)
if len(query_xml)>0:
for doc_id,weight in query_xml:
print ("ID: %d Path/URL: "%doc_id+dic[doc_id]+"\nCosine Similarity: %f\n"%weight)
else:
print("Query appears in every file or doesn't appear at all")
print("Respond time = %f\n"%time)
#-------------------------------------- EXIT PROGRAM ----------------------------------------#
elif(x is '3'):
break
| kagklis/profile-analysis | profile_analysis.py | Python | mit | 16,905 | 0.017569 |
#!/usr/bin/env python
#export CC=mpicc
#export CXX=mpic++
from distutils.core import setup, Extension
from distutils import sysconfig
import sys
print "Remember to set your preferred MPI C++ compiler in the CC and CXX environment variables. For example, in Bash:"
print "export CC=mpicxx"
print "export CXX=mpicxx"
print ""
def see_if_compiles(program, include_dirs, define_macros):
""" Try to compile the passed in program and report if it compiles successfully or not. """
from distutils.ccompiler import new_compiler, CompileError
from shutil import rmtree
import tempfile
import os
try:
tmpdir = tempfile.mkdtemp()
except AttributeError:
# Python 2.2 doesn't have mkdtemp().
tmpdir = "compile_check_tempdir"
try:
os.mkdir(tmpdir)
except OSError:
print "Can't create temporary directory. Aborting."
sys.exit()
old = os.getcwd()
os.chdir(tmpdir)
# Try to include the header
f = open('compiletest.cpp', 'w')
f.write(program)
f.close()
try:
c = new_compiler()
for macro in define_macros:
c.define_macro(name=macro[0], value=macro[1])
c.compile([f.name], include_dirs=include_dirs)
success = True
except CompileError:
success = False
os.chdir(old)
rmtree(tmpdir)
return success
def check_for_header(header, include_dirs, define_macros):
"""Check for the existence of a header file by creating a small program which includes it and see if it compiles."""
program = "#include <%s>\n" % header
sys.stdout.write("Checking for <%s>... " % header)
success = see_if_compiles(program, include_dirs, define_macros)
if (success):
sys.stdout.write("OK\n");
else:
sys.stdout.write("Not found\n");
return success
def check_for_MPI_IN_PLACE(include_dirs, define_macros):
""" Check for the existence of the MPI_IN_PLACE constant. """
program = """
#include <mpi.h>
int main(int argc, const char** argv) {
void* buf = NULL;
MPI_Allreduce(MPI_IN_PLACE, buf, 10, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
return 0;
}
"""
sys.stdout.write("Checking for MPI_IN_PLACE... ")
sys.stdout.flush()
success = see_if_compiles(program, include_dirs, define_macros)
if (success):
sys.stdout.write("OK\n");
else:
sys.stdout.write("Not found\n");
return success
def check_for_C99_CONSTANTS(include_dirs, define_macros):
""" See if C99 constants for integers are defined. """
program = """
#include <iostream>
#include <stdint.h>
int main()
{
uint64_t v, val0;
v *= (val0 | UINT64_C(0x4519840211493211));
uint32_t l = (uint32_t)(x & UINT32_MAX);
return v;
}
"""
sys.stdout.write("Checking for C99 constants... ")
success = see_if_compiles(program, include_dirs, define_macros)
if (success):
sys.stdout.write("OK\n");
else:
sys.stdout.write("Not found, will use __STDC_CONSTANT_MACROS and __STDC_LIMIT_MACROS\n");
return success
# parse out additional include dirs from the command line
include_dirs = []
define_macros = [("MPICH_IGNORE_CXX_SEEK", None)]
copy_args=sys.argv[1:]
for a in copy_args:
if a.startswith('-I'):
include_dirs.append(a[2:])
copy_args.remove(a)
if a.startswith('-D'):
# macros can be a single value or a constant=value pair
macro = tuple(a[2:].split("="))
if (len(macro) == 1):
macro = (macro[0], None)
define_macros.append(macro)
copy_args.remove(a)
# see if the compiler has TR1
hasCpp0x = False
hasTR1 = False
hasBoost = False
headerDefs = []
print "Checking for C++0x:"
if check_for_header("memory", include_dirs, define_macros):
hasCpp0x = True
else:
print "No C++0x. Checking for TR1:"
if check_for_header("tr1/memory", include_dirs, define_macros):
hasTR1 = True
headerDefs = [('COMBBLAS_TR1', None)]
else:
# nope, see if boost is available
print "No TR1. Checking for Boost:"
if check_for_header("boost/tr1/memory.hpp", include_dirs, define_macros):
hasBoost = True
headerDefs = [('COMBBLAS_BOOST', None)]
else:
# nope, then sorry
print "KDT uses features from C++0x (TR1). These are available from some compilers or through the Boost C++ library (www.boost.org)."
print "Please make sure Boost is in your system include path or append the include path with the -I switch."
print "For example, if you have Boost installed in /home/username/include/boost:"
print "$ python setup.py build -I/home/username/include"
sys.exit();
#if (not check_for_MPI_IN_PLACE(include_dirs, define_macros)):
# print "Please use a more recent MPI implementation."
# print "If you system has multiple MPI implementations you can set your preferred MPI C++ compiler in the CC and CXX environment variables. For example, in Bash:"
# print "export CC=mpicxx"
# print "export CXX=mpicxx"
# sys.exit();
if (not check_for_C99_CONSTANTS(include_dirs, define_macros)):
define_macros.append(("__STDC_CONSTANT_MACROS", None))
define_macros.append(("__STDC_LIMIT_MACROS", None))
COMBBLAS = "CombBLAS/"
PCB = "kdt/pyCombBLAS/"
GENERATOR = "CombBLAS/graph500-1.2/generator/"
#files for the graph500 graph generator.
generator_files = [GENERATOR+"btrd_binomial_distribution.c", GENERATOR+"splittable_mrg.c", GENERATOR+"mrg_transitions.c", GENERATOR+"graph_generator.c", GENERATOR+"permutation_gen.c", GENERATOR+"make_graph.c", GENERATOR+"utils.c", GENERATOR+"scramble_edges.c"]
#pyCombBLAS extension which wraps the templated C++ Combinatorial BLAS library.
pyCombBLAS_ext = Extension('kdt._pyCombBLAS',
[PCB+"pyCombBLAS.cpp", PCB+"pyCombBLAS_wrap.cpp", PCB+"pyDenseParVec.cpp", PCB+"pyObjDenseParVec.cpp", PCB+"pySpParVec.cpp", PCB+"pySpParMat.cpp", PCB+"pySpParMatBool.cpp", PCB+"pyOperations.cpp", COMBBLAS+"CommGrid.cpp", COMBBLAS+"MPIType.cpp", COMBBLAS+"MemoryPool.cpp"] + generator_files,
include_dirs=include_dirs,
define_macros=[('NDEBUG', '1'),('restrict', '__restrict__'),('GRAPH_GENERATOR_SEQ', '1')] + headerDefs + define_macros)
setup(name='kdt',
version='0.1',
description='Knowledge Discovery Toolbox',
author='Aydin Buluc, John Gilbert, Adam Lugowski, Steve Reinhardt',
url='http://kdt.sourceforge.net',
# packages=['kdt', 'kdt'],
ext_modules=[pyCombBLAS_ext],
py_modules = ['kdt.pyCombBLAS', 'kdt.Graph', 'kdt.DiGraph', 'kdt.HyGraph', 'kdt.feedback', 'kdt.UFget'],
script_args=copy_args
)
| harperj/KDTSpecializer | setup.py | Python | bsd-3-clause | 6,192 | 0.028262 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class AccountInvoiceSend(models.TransientModel):
_name = 'account.invoice.send'
_inherit = 'account.invoice.send'
_description = 'Account Invoice Send'
partner_id = fields.Many2one('res.partner', compute='_get_partner', string='Partner')
snailmail_is_letter = fields.Boolean('Send by Post', help='Allows to send the document by Snailmail (coventional posting delivery service)', default=lambda self: self.env.company.invoice_is_snailmail)
snailmail_cost = fields.Float(string='Stamp(s)', compute='_compute_snailmail_cost', readonly=True)
invalid_addresses = fields.Integer('Invalid Addresses Count', compute='_compute_invalid_addresses')
invalid_invoice_ids = fields.Many2many('account.move', string='Invalid Addresses', compute='_compute_invalid_addresses')
@api.depends('invoice_ids')
def _compute_invalid_addresses(self):
for wizard in self:
invalid_invoices = wizard.invoice_ids.filtered(lambda i: not self.env['snailmail.letter']._is_valid_address(i.partner_id))
wizard.invalid_invoice_ids = invalid_invoices
wizard.invalid_addresses = len(invalid_invoices)
@api.depends('invoice_ids')
def _get_partner(self):
self.partner_id = self.env['res.partner']
for wizard in self:
if wizard.invoice_ids and len(wizard.invoice_ids) == 1:
wizard.partner_id = wizard.invoice_ids.partner_id.id
@api.depends('snailmail_is_letter')
def _compute_snailmail_cost(self):
for wizard in self:
wizard.snailmail_cost = len(wizard.invoice_ids.ids)
def snailmail_print_action(self):
self.ensure_one()
letters = self.env['snailmail.letter']
for invoice in self.invoice_ids:
letter = self.env['snailmail.letter'].create({
'partner_id': invoice.partner_id.id,
'model': 'account.move',
'res_id': invoice.id,
'user_id': self.env.user.id,
'company_id': invoice.company_id.id,
'report_template': self.env.ref('account.account_invoices').id
})
letters |= letter
self.invoice_ids.filtered(lambda inv: not inv.is_move_sent).write({'is_move_sent': True})
if len(self.invoice_ids) == 1:
letters._snailmail_print()
else:
letters._snailmail_print(immediate=False)
def send_and_print_action(self):
if self.snailmail_is_letter:
if self.env['snailmail.confirm.invoice'].show_warning():
wizard = self.env['snailmail.confirm.invoice'].create({'model_name': _('Invoice'), 'invoice_send_id': self.id})
return wizard.action_open()
self._print_action()
return self.send_and_print()
def _print_action(self):
if not self.snailmail_is_letter:
return
if self.invalid_addresses and self.composition_mode == "mass_mail":
self.notify_invalid_addresses()
self.snailmail_print_action()
def send_and_print(self):
res = super(AccountInvoiceSend, self).send_and_print_action()
return res
def notify_invalid_addresses(self):
self.ensure_one()
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', self.env.user.partner_id.id),
{'type': 'snailmail_invalid_address', 'title': _("Invalid Addresses"),
'message': _("%s of the selected invoice(s) had an invalid address and were not sent", self.invalid_addresses)}
)
def invalid_addresses_action(self):
return {
'name': _('Invalid Addresses'),
'type': 'ir.actions.act_window',
'view_mode': 'kanban,tree,form',
'res_model': 'account.move',
'domain': [('id', 'in', self.mapped('invalid_invoice_ids').ids)],
}
| ddico/odoo | addons/snailmail_account/wizard/account_invoice_send.py | Python | agpl-3.0 | 4,064 | 0.002953 |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import ssl
import sys
import pymongo
sys.path[0:0] = [""] # noqa
from mongo_connector import config, errors, connector
from mongo_connector.connector import get_config_options, setup_logging
from mongo_connector.doc_managers import doc_manager_simulator
from tests import unittest
def from_here(*paths):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)
class TestConfig(unittest.TestCase):
"""Test parsing a JSON config file into a Config object."""
def setUp(self):
self.reset_config()
def reset_config(self):
self.options = get_config_options()
self.conf = config.Config(self.options)
def load_json(self, d, validate=True, reset_config=True):
if reset_config:
self.reset_config()
# Serialize a python dictionary to json, then load it
text = json.dumps(d)
self.conf.load_json(text)
if validate:
self.load_options(reset_config=False)
def load_options(self, d={}, reset_config=True):
if reset_config:
self.reset_config()
argv = []
for k, v in d.items():
argv.append(str(k))
if v is not None:
argv.append(str(v))
self.conf.parse_args(argv)
def test_default(self):
# Make sure default configuration doesn't raise any exceptions
self.load_options()
def test_parse_json(self):
# Test for basic json parsing correctness
test_config = {
"mainAddress": u"testMainAddress",
"oplogFile": u"testOplogFile",
"noDump": True,
"batchSize": 69,
"verbosity": 3,
"logging": {
"type": u"file",
"filename": u"testFilename",
"format": u"%(asctime)s [%(levelname)s] %(name)s:%(lineno)d"
u" - %(message)s",
"rotationWhen": u"midnight",
"rotationInterval": 1,
"rotationBackups": 7,
"host": u"testHost",
"facility": u"testFacility",
},
"authentication": {
"adminUsername": u"testAdminUsername",
"password": u"testPassword",
"passwordFile": u"testPasswordFile",
},
"fields": [u"testFields1", u"testField2"],
"namespaces": {
"include": [u"testNamespaceSet"],
"exclude": [u"testExcludeNamespaceSet"],
"mapping": {"testMapKey": u"testMapValue"},
"gridfs": [u"testGridfsSet"],
},
}
self.load_json(test_config, validate=False)
for test_key in test_config:
self.assertEqual(self.conf[test_key], test_config[test_key])
# Test for partial dict updates
test_config = {
"logging": {"type": "syslog", "host": "testHost2"},
"authentication": {
"adminUsername": "testAdminUsername2",
"passwordFile": "testPasswordFile2",
},
"namespaces": {"exclude": [], "mapping": {}},
}
self.load_json(test_config, validate=False, reset_config=False)
self.assertEqual(
self.conf["logging"],
{
"type": u"syslog",
"filename": u"testFilename",
"format": u"%(asctime)s [%(levelname)s] %(name)s:%(lineno)d"
u" - %(message)s",
"rotationWhen": u"midnight",
"rotationInterval": 1,
"rotationBackups": 7,
"host": u"testHost2",
"facility": u"testFacility",
},
)
self.assertEqual(
self.conf["authentication"],
{
"adminUsername": u"testAdminUsername2",
"password": u"testPassword",
"passwordFile": u"testPasswordFile2",
},
)
self.assertEqual(
self.conf["namespaces"],
{
"include": [u"testNamespaceSet"],
"exclude": [],
"mapping": {},
"gridfs": [u"testGridfsSet"],
},
)
def test_basic_options(self):
# Test the assignment of individual options
def test_option(arg_name, json_key, value, append_cli=True):
self.load_options({arg_name: value if append_cli else None})
self.assertEqual(self.conf[json_key], value)
test_option("-m", "mainAddress", "testMainAddress")
test_option("-o", "oplogFile", "testOplogFileShort")
test_option("--batch-size", "batchSize", 69)
test_option("--continue-on-error", "continueOnError", True, append_cli=False)
test_option("-v", "verbosity", 3, append_cli=False)
self.load_options({"-w": "logFile"})
self.assertEqual(self.conf["logging.type"], "file")
self.assertEqual(self.conf["logging.filename"], os.path.abspath("logFile"))
self.load_options(
{
"-s": None,
"--syslog-host": "testHost",
"--syslog-facility": "testFacility",
}
)
self.assertEqual(self.conf["logging.type"], "syslog")
self.assertEqual(self.conf["logging.host"], "testHost")
self.assertEqual(self.conf["logging.facility"], "testFacility")
self.load_options({"-i": "a,b,c"})
self.assertEqual(self.conf["fields"], ["a", "b", "c"])
def test_extraneous_command_line_options(self):
self.assertRaises(errors.InvalidConfiguration, self.load_options, {"-v": 3})
# No error.
self.load_options({"-v": None})
def test_namespace_set(self):
# test namespace_set and dest_namespace_set
self.load_options(
{
"-n": "source_db_1.col,source_db_2.col,source_db_3.col",
"-g": "dest_db_1.col,dest_db_2.col,dest_db_3.col",
}
)
self.assertEqual(
self.conf["namespaces.include"],
["source_db_1.col", "source_db_2.col", "source_db_3.col"],
)
self.assertEqual(
self.conf["namespaces.mapping"],
{
"source_db_1.col": "dest_db_1.col",
"source_db_2.col": "dest_db_2.col",
"source_db_3.col": "dest_db_3.col",
},
)
# test exclude_namespace_set
self.load_options({"-x": "source_db_1.col,source_db_2.col,source_db_3.col"})
self.assertEqual(
self.conf["namespaces.exclude"],
["source_db_1.col", "source_db_2.col", "source_db_3.col"],
)
def test_namespace_set_validation(self):
# duplicate ns_set
args = {"-n": "a.x,a.x,b.y", "-g": "1.0,2.0,3.0"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
d = {"namespaces": {"include": ["a.x", "a.x", "b.y"]}}
self.assertRaises(errors.InvalidConfiguration, self.load_json, d)
# duplicate ex_ns_set
args = {"-x": "a.x,a.x,b.y"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
d = {"namespaces": {"exclude": ["a.x", "a.x", "b.y"]}}
self.assertRaises(errors.InvalidConfiguration, self.load_json, d)
# duplicate gridfs_set
args = {"--gridfs-set": "a.x,a.x,b.y"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
d = {"namespaces": {"gridfs": ["a.x", "a.x", "b.y"]}}
self.assertRaises(errors.InvalidConfiguration, self.load_json, d)
# duplicate dest_ns_set
args = {"-n": "a.x,b.y,c.z", "--dest-namespace-set": "1.0,3.0,3.0"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
d = {"namespaces": {"mapping": {"a.x": "c.z", "b.y": "c.z"}}}
self.assertRaises(errors.InvalidConfiguration, self.load_json, d)
# len(ns_set) < len(dest_ns_set)
args = {"--namespace-set": "a.x,b.y,c.z", "-g": "1.0,2.0,3.0,4.0"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
# len(ns_set) > len(dest_ns_set)
args = {
"--namespace-set": "a.x,b.y,c.z,d.j",
"--dest-namespace-set": "1.0,2.0,3.0",
}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
# validate ns_set format
args = {"-n": "a*.x*"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
d = {"namespaces": {"include": ["a*.x*"]}}
self.assertRaises(errors.InvalidConfiguration, self.load_json, d)
# validate dest_ns_set format
args = {"-n": "a.x*", "-g": "1*.0*"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
d = {"namespaces": {"mapping": {"a*.x*": "1.0"}}}
self.assertRaises(errors.InvalidConfiguration, self.load_json, d)
d = {"namespaces": {"mapping": {"a.x*": "1*.0*"}}}
self.assertRaises(errors.InvalidConfiguration, self.load_json, d)
def test_validate_mixed_namespace_format(self):
# It is invalid to combine new and old namespace formats
mix_namespaces = [
{"mapping": {"old.format": "old.format2"}, "new.format": True},
{"gridfs": ["old.format"], "new.format": True},
{"include": ["old.format"], "new.format": True},
{"exclude": ["old.format"], "new.format": True},
]
for namespaces in mix_namespaces:
with self.assertRaises(errors.InvalidConfiguration):
self.load_json({"namespaces": namespaces})
def test_doc_managers_from_args(self):
# Test basic docmanager construction from args
args = {
"-d": "doc_manager_simulator",
"-t": "test_target_url",
"-u": "id",
"--auto-commit-interval": 10,
}
self.load_options(args)
self.assertEqual(len(self.conf["docManagers"]), 1)
dm = self.conf["docManagers"][0]
self.assertTrue(isinstance(dm, doc_manager_simulator.DocManager))
self.assertEqual(dm.url, "test_target_url")
self.assertEqual(dm.unique_key, "id")
self.assertEqual(dm.auto_commit_interval, 10)
# no doc_manager but target_url
args = {"-t": "1,2"}
self.assertRaises(errors.InvalidConfiguration, self.load_options, args)
def test_config_validation(self):
# can't log both to syslog and to logfile
self.assertRaises(
errors.InvalidConfiguration,
self.load_options,
{"-w": "logFile", "-s": "true"},
)
# Can't specify --stdout and logfile
self.assertRaises(
errors.InvalidConfiguration,
self.load_options,
{"--stdout": None, "-w": "logFile"},
)
# can't specify a username without a password
self.assertRaises(
errors.InvalidConfiguration, self.load_options, {"-a": "username"}
)
# can't specify password and password file
self.assertRaises(
errors.InvalidConfiguration,
self.load_options,
{"-a": "username", "-p": "password", "-f": "password_file"},
)
# docManagers must be a list
test_config = {"docManagers": "hello"}
self.assertRaises(errors.InvalidConfiguration, self.load_json, test_config)
# every element of docManagers must contain a 'docManager' property
test_config = {"docManagers": [{"targetURL": "testTargetURL"}]}
self.assertRaises(errors.InvalidConfiguration, self.load_json, test_config)
# auto commit interval can't be negative
test_config = {
"docManagers": [{"docManager": "testDocManager", "autoCommitInterval": -1}]
}
self.assertRaises(errors.InvalidConfiguration, self.load_json, test_config)
def test_ssl_validation(self):
"""Test setting sslCertificatePolicy."""
# Setting sslCertificatePolicy to not 'ignored' without a CA file
# PyMongo will attempt to load system provided CA certificates.
for ssl_cert_req in ["required", "optional"]:
no_ca_config = {"ssl": {"sslCertificatePolicy": ssl_cert_req}}
if pymongo.version_tuple < (3, 0):
self.assertRaises(
errors.InvalidConfiguration, self.load_json, no_ca_config
)
else:
self.load_json(no_ca_config)
# Setting sslCertificatePolicy to an invalid option
self.assertRaises(
errors.InvalidConfiguration,
self.load_json,
{"ssl": {"sslCACerts": "ca.pem", "sslCertificatePolicy": "invalid"}},
)
class TestConnectorConfig(unittest.TestCase):
"""Test creating a Connector from a Config."""
# Configuration where every option is set to a non-default value.
set_everything_config = {
"mainAddress": "localhost:12345",
"oplogFile": from_here("lib", "dummy.timestamp"),
"noDump": True,
"batchSize": 3,
"verbosity": 1,
"continueOnError": True,
"timezoneAware": True,
"logging": {
"type": "file",
"filename": from_here("lib", "dummy-connector.log"),
"rotationWhen": "H",
"rotationInterval": 3,
"rotationBackups": 10,
},
"authentication": {
"adminUsername": "elmo",
"passwordFile": from_here("lib", "dummy.pwd"),
},
"ssl": {
"sslCertfile": "certfile.pem",
"sslKeyfile": "certfile.key",
"sslCACerts": "ca.pem",
"sslCertificatePolicy": "optional",
},
"fields": ["field1", "field2", "field3"],
"namespaces": {
"include": ["db.source1", "db.source2"],
"mapping": {"db.source1": "db.dest1", "db.source2": "db.dest2"},
"gridfs": ["db.fs"],
},
"docManagers": [
{
"docManager": "doc_manager_simulator",
"targetURL": "localhost:12345",
"bulkSize": 500,
"uniqueKey": "id",
"autoCommitInterval": 10,
"args": {"key": "value", "clientOptions": {"foo": "bar"}},
}
],
}
# Argv that sets all possible options to a different value from the
# config JSON above. Some options cannot be reset, since they conflict
# with the JSON config and will cause an Exception to be raised.
# Conflicted options are already tested in the TestConfig TestCase.
set_everything_differently_argv = [
"-m",
"localhost:1000",
"-o",
from_here("lib", "bar.timestamp"),
"--batch-size",
"100",
"--verbose",
"--logfile-when",
"D",
"--logfile-interval",
"5",
"--logfile-backups",
"10",
"--fields",
"fieldA,fieldB",
"--gridfs-set",
"db.gridfs",
"--unique-key",
"customer_id",
"--auto-commit-interval",
"100",
"--continue-on-error",
"-t",
"localhost:54321",
"-d",
"doc_manager_simulator",
"-n",
"foo.bar,fiz.biz",
"-g",
"foo2.bar2,fiz2.biz2",
"--ssl-certfile",
"certfile2.pem",
"--ssl-ca-certs",
"ca2.pem",
"--ssl-certificate-policy",
"ignored",
]
# Set of files to keep in the 'lib' directory after each run.
# The Connector and OplogThread create their own files in this directory
# that should be cleaned out between tests.
files_to_keep = set(("dummy.pwd",))
def setUp(self):
self.config = config.Config(get_config_options())
# Remove all logging Handlers, since tests may create Handlers.
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
def tearDown(self):
for filename in os.listdir(from_here("lib")):
if filename not in self.files_to_keep:
try:
os.remove(from_here("lib", filename))
except OSError:
pass # File may no longer exist.
def assertConnectorState(self):
"""Assert that a Connector is constructed from a Config properly."""
mc = connector.Connector.from_config(self.config)
# Test Connector options.
self.assertEqual(mc.address, self.config["mainAddress"])
self.assertIsInstance(mc.doc_managers[0], doc_manager_simulator.DocManager)
pwfile = self.config["authentication.passwordFile"]
if pwfile:
with open(pwfile, "r") as fd:
test_password = re.sub(r"\s", "", fd.read())
self.assertEqual(mc.auth_key, test_password)
self.assertEqual(mc.auth_username, self.config["authentication.adminUsername"])
self.assertEqual(mc.oplog_checkpoint, self.config["oplogFile"])
self.assertEqual(mc.tz_aware, self.config["timezoneAware"])
self.assertEqual(
mc.ssl_kwargs.get("ssl_certfile"), self.config["ssl.sslCertfile"]
)
self.assertEqual(
mc.ssl_kwargs.get("ssl_ca_certs"), self.config["ssl.sslCACerts"]
)
self.assertEqual(
mc.ssl_kwargs.get("ssl_keyfile"), self.config["ssl.sslKeyfile"]
)
self.assertEqual(
mc.ssl_kwargs.get("ssl_cert_reqs"), self.config["ssl.sslCertificatePolicy"]
)
command_helper = mc.doc_managers[0].command_helper
for name in self.config["namespaces.mapping"]:
self.assertTrue(command_helper.namespace_config.map_namespace(name))
# Test Logger options.
log_levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
test_logger = setup_logging(self.config)
self.assertEqual(log_levels[self.config["verbosity"]], test_logger.level)
test_handlers = [
h
for h in test_logger.handlers
if isinstance(h, logging.handlers.TimedRotatingFileHandler)
]
self.assertEqual(len(test_handlers), 1)
test_handler = test_handlers[0]
expected_handler = logging.handlers.TimedRotatingFileHandler(
"test-dummy.log",
when=self.config["logging.rotationWhen"],
interval=self.config["logging.rotationInterval"],
backupCount=self.config["logging.rotationBackups"],
)
self.assertEqual(test_handler.when, expected_handler.when)
self.assertEqual(test_handler.backupCount, expected_handler.backupCount)
self.assertEqual(test_handler.interval, expected_handler.interval)
# Test keyword arguments passed to OplogThread.
ot_kwargs = mc.kwargs
self.assertEqual(ot_kwargs["ns_set"], self.config["namespaces.include"])
self.assertEqual(ot_kwargs["collection_dump"], not self.config["noDump"])
self.assertEqual(ot_kwargs["gridfs_set"], self.config["namespaces.gridfs"])
self.assertEqual(ot_kwargs["continue_on_error"], self.config["continueOnError"])
self.assertEqual(ot_kwargs["fields"], self.config["fields"])
self.assertEqual(ot_kwargs["batch_size"], self.config["batchSize"])
# Test DocManager options.
for dm, dm_expected in zip(mc.doc_managers, self.config["docManagers"]):
self.assertEqual(dm.kwargs, dm_expected.kwargs)
self.assertEqual(dm.auto_commit_interval, dm_expected.auto_commit_interval)
self.assertEqual(dm.url, dm_expected.url)
self.assertEqual(dm.chunk_size, dm_expected.chunk_size)
def test_connector_config_file_options(self):
# Test Config with only a configuration file.
self.config.load_json(json.dumps(self.set_everything_config))
self.config.parse_args(argv=[])
self.assertConnectorState()
def test_connector_with_argv(self):
# Test Config with arguments given on the command-line.
self.config.parse_args(self.set_everything_differently_argv)
self.assertConnectorState()
def test_override_config_with_argv(self):
# Override some options in the config file with given command-line
# options.
self.config.load_json(json.dumps(TestConnectorConfig.set_everything_config))
self.config.parse_args(self.set_everything_differently_argv)
first_dm = self.config["docManagers"][0]
first_dm_config = self.set_everything_config["docManagers"][0]
self.assertEqual(first_dm.url, "localhost:54321")
self.assertEqual(first_dm.chunk_size, first_dm_config["bulkSize"])
self.assertEqual(
first_dm.kwargs.get("clientOptions"),
first_dm_config["args"]["clientOptions"],
)
self.assertConnectorState()
def test_client_options(self):
config_def = {
"mainAddress": "localhost:27017",
"oplogFile": from_here("lib", "dummy.timestamp"),
"docManagers": [
{
"docManager": "mongo_doc_manager",
"targetURL": "dummyhost:27017",
"args": {"clientOptions": {"maxPoolSize": 50, "connect": False}},
}
],
}
config_obj = config.Config(get_config_options())
config_obj.load_json(json.dumps(config_def))
config_obj.parse_args(argv=[])
conn = connector.Connector.from_config(config_obj)
self.assertEqual(50, conn.doc_managers[0].mongo.max_pool_size)
def test_ssl_options(self):
config_def = {
"mainAddress": "localhost:27017",
"oplogFile": from_here("lib", "dummy.timestamp"),
"ssl": {
"sslCertfile": "certfile.pem",
"sslKeyfile": "certfile.key",
"sslCACerts": "ca.pem",
},
}
for cert_policy, expected_ssl_cert_req in [
("ignored", ssl.CERT_NONE),
("optional", ssl.CERT_OPTIONAL),
("required", ssl.CERT_REQUIRED),
(None, None),
]:
config_def["ssl"]["sslCertificatePolicy"] = cert_policy
config_obj = config.Config(get_config_options())
config_obj.load_json(json.dumps(config_def))
config_obj.parse_args(argv=[])
mc = connector.Connector.from_config(config_obj)
self.assertEqual("certfile.pem", mc.ssl_kwargs.get("ssl_certfile"))
self.assertEqual("ca.pem", mc.ssl_kwargs.get("ssl_ca_certs"))
self.assertEqual("certfile.key", mc.ssl_kwargs.get("ssl_keyfile"))
self.assertEqual(expected_ssl_cert_req, mc.ssl_kwargs.get("ssl_cert_reqs"))
if __name__ == "__main__":
unittest.main()
| americanstone/mongo-connector | tests/test_config.py | Python | apache-2.0 | 23,616 | 0.001059 |
"""
Class which stores coupled collection of Kuramoto oscillators
"""
import numpy as np
import pandas as pd
from scipy.integrate import odeint
import seaborn as sns
import matplotlib.pylab as plt
class System(object):
"""
Represent system of oscillators
"""
def __init__(self, A, B, omega, OMEGA):
"""
Create a system of Kuramoto oscillators.
Arguments:
A
Interal coupling matrix of the system
B
Coupling of the external force to each oscillator
omega
Internal oscillator frequency
OMEGA
External driving force frequency
"""
self._A = A
self._B = B
self._OMEGA = OMEGA
self._omegas = omega * np.ones(A.shape[0])
@property
def A(self):
return self._A
@property
def B(self):
return self._B
@property
def OMEGA(self):
return self._OMEGA
@property
def omegas(self):
return self._omegas
@property
def Phi(self):
return lambda t: self.OMEGA * t
def _get_equation(self):
"""
Generate ODE system
"""
def func(theta, t=0):
ode = []
for i, omega in enumerate(self.omegas):
ode.append(
omega \
+ np.sum([self.A[i,j] * np.sin(theta[j] - theta[i])
for j in range(len(self.omegas))]) \
+ self.B[i] * np.sin(self.Phi(t) - theta[i])
)
return np.array(ode)
return func
def solve(self, dt, T, init=None):
"""
Solve system of ODEs.
Arguments:
dt
Step size of the simulation
T
Maximal time to run the simulation to
init
Initial condition of the system
"""
ts = np.arange(0, T, dt)
if init is None:
init = np.random.uniform(0, 2*np.pi, size=self.omegas.shape)
sol = odeint(self._get_equation(), init, ts).T
return sol, ts
@staticmethod
def plot_solution(driver_sol, sols, ts):
"""
Plot solution of oscillator system.
Arguments:
driver_sol
Solution of external driver
sols
List of system solutions
ts
List of time points of the simulation
"""
# confine to circular region
sols %= 2*np.pi
driver_sol %= 2*np.pi
# convert to DataFrame
df = pd.DataFrame.from_dict([
{
'theta': theta,
'time': ts[i],
'oscillator': osci+1,
'source': 'raw'
}
for osci, sol in enumerate(sols)
for i, theta in enumerate(sol)
])
df = df.append(pd.DataFrame.from_dict([
{
'theta': theta,
'time': ts[i],
'oscillator': 'driver',
'source': 'raw'
}
for i, theta in enumerate(driver_sol)
]))
# plot result
plt.figure()
sns.tsplot(
time='time', value='theta',
condition='oscillator', unit='source',
data=df)
plt.show()
| kpj/OsciPy | system.py | Python | mit | 3,392 | 0.001474 |
# Generated by Django 3.2.4 on 2021-06-11 13:24
import calaccess_processed.proxies
from django.db import migrations
class Migration(migrations.Migration):
initial = True
dependencies = [
('elections', '0008_auto_20181029_1527'),
]
operations = [
migrations.CreateModel(
name='OCDFlatBallotMeasureContestProxy',
fields=[
],
options={
'verbose_name_plural': 'ballot measures',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('elections.ballotmeasurecontest', calaccess_processed.proxies.OCDProxyModelMixin),
),
migrations.CreateModel(
name='OCDFlatCandidacyProxy',
fields=[
],
options={
'verbose_name_plural': 'candidates',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('elections.candidacy', calaccess_processed.proxies.OCDProxyModelMixin),
),
migrations.CreateModel(
name='OCDFlatRetentionContestProxy',
fields=[
],
options={
'verbose_name_plural': 'recall measures',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('elections.retentioncontest', calaccess_processed.proxies.OCDProxyModelMixin),
),
]
| california-civic-data-coalition/django-calaccess-processed-data | calaccess_processed_flatfiles/migrations/0001_initial.py | Python | mit | 1,525 | 0.001967 |
"""Affine measure"""
import numpy as np
from py_stringmatching import utils
from six.moves import xrange
from py_stringmatching.similarity_measure.sequence_similarity_measure import \
SequenceSimilarityMeasure
def sim_ident(char1, char2):
return int(char1 == char2)
class Affine(SequenceSimilarityMeasure):
"""Computes the affine gap score between two strings.
The affine gap measure is an extension of the Needleman-Wunsch measure that handles the longer gaps more gracefully. For more information refer to the string matching chapter
in the DI book ("Principles of Data Integration").
Parameters:
gap_start (float): Cost for the gap at the start (defaults to 1)
gap_continuation (float): Cost for the gap continuation (defaults to 0.5)
sim_func (function): Function computing similarity score between two chars,
represented as strings (defaults to identity).
"""
def __init__(self, gap_start=1, gap_continuation=0.5, sim_func=sim_ident):
self.gap_start = gap_start
self.gap_continuation = gap_continuation
self.sim_func = sim_func
super(Affine, self).__init__()
def get_raw_score(self, string1, string2):
"""
Args:
string1,string2 (str) : Input strings
Returns:
Affine gap score (float)
Raises:
TypeError : If the inputs are not strings or if one of the inputs is None.
Examples:
>>> aff = Affine()
>>> aff.get_raw_score('dva', 'deeva')
1.5
>>> aff = Affine(gap_start=2, gap_continuation=0.5)
>>> aff.get_raw_score('dva', 'deeve')
-0.5
>>> aff = Affine(gap_continuation=0.2, sim_func=lambda s1, s2: (int(1 if s1 == s2 else 0)))
>>> aff.get_raw_score('AAAGAATTCA', 'AAATCA')
4.4
"""
# input validations
utils.sim_check_for_none(string1, string2)
utils.tok_check_for_string_input(string1, string2)
# if one of the strings is empty return 0
if utils.sim_check_for_empty(string1, string2):
return 0
gap_start = -self.gap_start
gap_continuation = -self.gap_continuation
m = np.zeros((len(string1) + 1, len(string2) + 1), dtype=np.float)
x = np.zeros((len(string1) + 1, len(string2) + 1), dtype=np.float)
y = np.zeros((len(string1) + 1, len(string2) + 1), dtype=np.float)
# DP initialization
for i in xrange(1, len(string1) + 1):
m[i][0] = -float("inf")
x[i][0] = gap_start + (i - 1) * gap_continuation
y[i][0] = -float("inf")
# DP initialization
for j in xrange(1, len(string2) + 1):
m[0][j] = -float("inf")
x[0][j] = -float("inf")
y[0][j] = gap_start + (j - 1) * gap_continuation
# affine gap calculation using DP
for i in xrange(1, len(string1) + 1):
for j in xrange(1, len(string2) + 1):
# best score between x_1....x_i and y_1....y_j
# given that x_i is aligned to y_j
m[i][j] = (self.sim_func(string1[i - 1], string2[j - 1]) +
max(m[i - 1][j - 1], x[i - 1][j - 1],
y[i - 1][j - 1]))
# the best score given that x_i is aligned to a gap
x[i][j] = max(gap_start + m[i - 1][j],
gap_continuation + x[i - 1][j])
# the best score given that y_j is aligned to a gap
y[i][j] = max(gap_start + m[i][j - 1],
gap_continuation + y[i][j - 1])
return max(m[len(string1)][len(string2)], x[len(string1)][len(string2)],
y[len(string1)][len(string2)])
def get_gap_start(self):
"""
Get gap start cost
Returns:
gap start cost (float)
"""
return self.gap_start
def get_gap_continuation(self):
"""
Get gap continuation cost
Returns:
gap continuation cost (float)
"""
return self.gap_continuation
def get_sim_func(self):
"""
Get similarity function
Returns:
similarity function (function)
"""
return self.sim_func
def set_gap_start(self, gap_start):
"""
Set gap start cost
Args:
gap_start (float): Cost for the gap at the start
"""
self.gap_start = gap_start
return True
def set_gap_continuation(self, gap_continuation):
"""
Set gap continuation cost
Args:
gap_continuation (float): Cost for the gap continuation
"""
self.gap_continuation = gap_continuation
return True
def set_sim_func(self, sim_func):
"""
Set similarity function
Args:
sim_func (function): Function computing similarity score between two chars, represented as strings.
"""
self.sim_func = sim_func
return True
| Anson-Doan/py_stringmatching | py_stringmatching/similarity_measure/affine.py | Python | bsd-3-clause | 5,192 | 0.002119 |
import doctest
from insights.parsers import keystone
from insights.tests import context_wrap
KEYSTONE_CONF = """
[DEFAULT]
#
# From keystone
#
admin_token = ADMIN
compute_port = 8774
[identity]
# From keystone
default_domain_id = default
#domain_specific_drivers_enabled = false
domain_configurations_from_database = false
[identity_mapping]
driver = keystone.identity.mapping_backends.sql.Mapping
generator = keystone.identity.id_generators.sha256.Generator
#backward_compatible_ids = true
""".strip()
def test_doc_examples():
failed_count, tests = doctest.testmod(
keystone,
globs={'conf': keystone.KeystoneConf(context_wrap(KEYSTONE_CONF))}
)
assert failed_count == 0
def test_keystone():
kconf = keystone.KeystoneConf(context_wrap(KEYSTONE_CONF))
assert kconf is not None
assert kconf.defaults() == {'admin_token': 'ADMIN',
'compute_port': '8774'}
assert 'identity' in kconf
assert 'identity_mapping' in kconf
assert kconf.has_option('identity', 'default_domain_id')
assert kconf.has_option('identity_mapping', 'driver')
assert not kconf.has_option('identity', 'domain_specific_drivers_enabled')
assert kconf.get('identity', 'default_domain_id') == 'default'
assert kconf.items('DEFAULT') == {'admin_token': 'ADMIN',
'compute_port': '8774'}
| RedHatInsights/insights-core | insights/parsers/tests/test_keystone.py | Python | apache-2.0 | 1,394 | 0 |
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib
from resources.lib.libraries import client
def resolve(url):
try:
headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url})
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://zstream.to/embed-%s.html' % url
result = client.request(url)
url = re.compile('file *: *"(http.+?)"').findall(result)
url = [i for i in url if not i.endswith('.srt')][-1]
url += headers
return url
except:
return
| AMOboxTV/AMOBox.LegoBuild | plugin.video.titan/resources/lib/resolvers/zstream.py | Python | gpl-2.0 | 1,309 | 0.004584 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
SUFFIX_NOT_ACRONYMS = set([
'esq',
'esquire',
'jr',
'jnr',
'sr',
'snr',
'2',
'i',
'ii',
'iii',
'iv',
'v',
])
SUFFIX_ACRONYMS = set([
'ae',
'afc',
'afm',
'arrc',
'bart',
'bem',
'bt',
'cb',
'cbe',
'cfp',
'cgc',
'cgm',
'ch',
'chfc',
'cie',
'clu',
'cmg',
'cpa',
'cpm',
'csi',
'csm',
'cvo',
'dbe',
'dcb',
'dcm',
'dcmg',
'dcvo',
'dds',
'dfc',
'dfm',
'dmd',
'do',
'dpm',
'dsc',
'dsm',
'dso',
'dvm',
'ed',
'erd',
'gbe',
'gc',
'gcb',
'gcie',
'gcmg',
'gcsi',
'gcvo',
'gm',
'idsm',
'iom',
'iso',
'kbe',
'kcb',
'kcie',
'kcmg',
'kcsi',
'kcvo',
'kg',
'kp',
'kt',
'lg',
'lt',
'lvo',
'ma',
'mba',
'mbe',
'mc',
'md',
'mm',
'mp',
'msm',
'mvo',
'obe',
'obi',
'om',
'phd',
'phr',
'pmp',
'qam',
'qc',
'qfsm',
'qgm',
'qpm',
'rd',
'rrc',
'rvm',
'sgm',
'td',
'ud',
'vc',
'vd',
'vrd',
])
SUFFIXES = SUFFIX_ACRONYMS | SUFFIX_NOT_ACRONYMS
"""
Pieces that come at the end of the name but are not last names. These potentially
conflict with initials that might be at the end of the name.
These may be updated in the future because some of them are actually titles that just
come at the end of the name, so semantically this is wrong. Positionally, it's correct.
""" | konieboy/Seng_403 | Gender Computer/nameparser/config/suffixes.py | Python | gpl-3.0 | 1,616 | 0.003094 |
from collections import OrderedDict
from plenum.common.messages.fields import LimitedLengthStringField
from plenum.common.messages.client_request import ClientMessageValidator
from plenum.common.messages.node_messages import Propagate
EXPECTED_ORDERED_FIELDS = OrderedDict([
("request", ClientMessageValidator),
("senderClient", LimitedLengthStringField),
])
def test_hash_expected_type():
assert Propagate.typename == "PROPAGATE"
def test_has_expected_fields():
actual_field_names = OrderedDict(Propagate.schema).keys()
assert list(actual_field_names) == list(EXPECTED_ORDERED_FIELDS.keys())
def test_has_expected_validators():
schema = dict(Propagate.schema)
for field, validator in EXPECTED_ORDERED_FIELDS.items():
assert isinstance(schema[field], validator)
| evernym/zeno | plenum/test/input_validation/message_validation/test_propagate_message.py | Python | apache-2.0 | 805 | 0 |
#/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
import time
import syndicate.ag.curation.specfile as AG_specfile
import syndicate.ag.curation.crawl as AG_crawl
DRIVER_NAME = "disk"
# list a directory
def disk_listdir( root_dir, dirpath ):
return os.listdir( "/" + os.path.join( root_dir.strip("/"), dirpath.strip("/") ) )
# is this a directory?
def disk_isdir( root_dir, dirpath ):
return os.path.isdir( "/" + os.path.join( root_dir.strip("/"), dirpath.strip("/") ) )
# build a hierarchy, using sensible default callbacks
def build_hierarchy( root_dir, include_cb, disk_specfile_cbs, max_retries=1, num_threads=2, allow_partial_failure=False ):
disk_crawler_cbs = AG_crawl.crawler_callbacks( include_cb=include_cb,
listdir_cb=disk_listdir,
isdir_cb=disk_isdir )
hierarchy = AG_crawl.build_hierarchy( [root_dir] * num_threads, "/", DRIVER_NAME, disk_crawler_cbs, disk_specfile_cbs, allow_partial_failure=allow_partial_failure, max_retries=max_retries )
return hierarchy | iychoi/syndicate-core | python/syndicate/ag/datasets/disk.py | Python | apache-2.0 | 1,718 | 0.020955 |
import pyglet
from gamewindow import GameWindow
from menu import MainMenuState
from pyglet.gl import *
window = GameWindow(width=800, height=600)
pyglet.gl.glClearColor(0.1, 0.1, 1.0, 1.0);
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
pyglet.resource.path = ['assets']
pyglet.resource.reindex()
window.push_state(MainMenuState)
pyglet.app.run()
| chris-statzer/survivebynine | test_states.py | Python | apache-2.0 | 383 | 0.005222 |
# coding: utf-8
# In[1]:
import sys
import pandas as pd
import matplotlib.pyplot as plt
filename1 = sys.argv[1]
filename2 = sys.argv[2]
#filename1 = "pagecounts-20160802-150000.txt"
#filename2 = "pagecounts-20160803-150000.txt"
# In[2]:
dataframe1 = pd.read_table(filename1, sep=' ', header=None, index_col=1,
names=['lang', 'page', 'views', 'bytes'])
dataframe2 = pd.read_table(filename2, sep=' ', header=None, index_col=1,
names=['lang', 'page', 'views', 'bytes'])
# In[3]:
dataframe1 = dataframe1.sort_values(["views"],ascending=False)
# In[4]:
combo = pd.concat([dataframe1, dataframe2], axis=1, join_axes=[dataframe1.index])
new_columns = combo.columns.values
new_columns[4]="views2"
combo.column = new_columns
#combo
# In[5]:
plt.figure(figsize=(10, 5)) # change the size to something sensible
plt.subplot(1, 2, 1) # subplots in 1 row, 2 columns, select the first
plt.title('Popularity Distribution')
plt.xlabel("Rank")
plt.ylabel("Views")
plt.plot(dataframe1['views'].values)
plt.subplot(1, 2, 2) # ... and then select the second
plt.title('Daily Correlation')
plt.xlabel("Day 2 views")
plt.ylabel("Day 1 views")
plt.plot(combo['views'].values,combo['views2'].values,'b.')
plt.xscale('log')
plt.yscale('log')
#plt.show()
plt.savefig('wikipedia.png')
| MockyJoke/numbers | ex2/code/create_plots.py | Python | mit | 1,292 | 0.01161 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that CAcheDir() works when using 'timestamp-newer' decisions.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write(['SConstruct'], """\
Decider('timestamp-newer')
CacheDir('cache')
Command('file.out', 'file.in', Copy('$TARGET', '$SOURCE'))
""")
test.write('file.in', "file.in\n")
test.run(arguments = '--cache-show --debug=explain .')
test.must_match('file.out', "file.in\n")
test.up_to_date(options = '--cache-show --debug=explain', arguments = '.')
test.sleep()
test.touch('file.in')
test.not_up_to_date(options = '--cache-show --debug=explain', arguments = '.')
test.up_to_date(options = '--cache-show --debug=explain', arguments = '.')
test.up_to_date(options = '--cache-show --debug=explain', arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| timj/scons | test/CacheDir/timestamp-newer.py | Python | mit | 2,038 | 0.008832 |
from ..base import WLElement
class WWW(WLElement):
pass
| fnp/librarian | src/librarian/elements/styles/www.py | Python | agpl-3.0 | 62 | 0 |
#
# test_tracking_events.py
#
# Copyright (C) 2017 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Unit tests for functions related to tracking events:
# `kano_profile.tracker.tracking_events`
#
import os
import json
import time
import pytest
from kano_profile.paths import tracker_events_file
import kano_profile.tracker.tracking_events as tracking_events
from kano_profile.tracker.tracker_token import TOKEN
@pytest.mark.parametrize('event_name, event_type, event_data', [
('low-battery', 'battery', '{"status": "low-charge"}'),
('auto-poweroff', 'battery', '{"status": "automatic-poweroff"}')
])
def test_generate_low_battery_event(event_name, event_type, event_data):
if os.path.exists(tracker_events_file):
os.remove(tracker_events_file)
tracking_events.generate_event(event_name)
assert os.path.exists(tracker_events_file)
events = []
with open(tracker_events_file, 'r') as events_f:
events.append(json.loads(events_f.readline()))
assert len(events) == 1
event = events[0]
expected_keys = [
'name',
'language',
'type',
'timezone_offset',
'cpu_id',
'os_version',
'token',
'time',
'data'
]
for key in expected_keys:
assert key in event
assert event['name'] == event_type
# language: en_GB,
assert event['type'] == 'data'
# timezone_offset: 3600,
# cpu_id: None,
# os_version: None,
assert event['token'] == TOKEN
# Allow some margin for time passing
assert abs(time.time() - event['time']) < 5
assert event['data'] == json.loads(event_data)
| KanoComputing/kano-profile | tests/profile/tracking/test_tracking_events.py | Python | gpl-2.0 | 1,689 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Datacratic Inc. All rights reserved.
# @Author: Alexis Tremblay
# @Email: atremblay@datacratic.com
# @Date: 2015-03-06 14:53:37
# @Last Modified by: Alexis Tremblay
# @Last Modified time: 2015-04-09 16:54:58
# @File Name: query.py
import json
import requests
import traceback
import copy
from collections import Counter
import logging
logging.basicConfig(level=logging.DEBUG)
class Query(object):
"""docstring for Query"""
def __init__(self, dataset_url):
self.dataset_url = dataset_url
self.SELECT = Counter()
self.WHERE = None # list()
self.GROUPBY = list()
self.OFFSET = None
self.LIMIT = None
self.ORDERBY = list()
def addSELECT(self, obj):
logging.debug("Adding SELECT {}".format(obj))
self.SELECT[obj] += 1
logging.debug(self.SELECT)
def removeSELECT(self, obj):
logging.debug("Removing SELECT {}".format(obj))
if obj not in self.SELECT:
return
self.SELECT[obj] -= 1
if self.SELECT[obj] == 0:
del self.SELECT[obj]
logging.debug(self.SELECT)
def mergeSELECT(self, query):
self.SELECT = self.SELECT + query.SELECT
def addWHERE(self, where, boolean=None):
if where is None:
return
if self.WHERE is None:
self.WHERE = where
else:
# if boolean is None:
# raise RuntimeError("Must provide boolean instruction to WHERE")
if boolean != "OR" and boolean != "AND":
raise RuntimeError("Boolean instruction must OR or AND")
self.WHERE = "({} {} {})".format(self.WHERE, boolean, where)
# self.WHERE.append(where)
def mergeWHERE(self, query, how):
self.addWHERE(query.WHERE, how)
# self.WHERE.extend(query.WHERE)
def addGROUPBY(self, value):
self.GROUPBY.append(str(value))
def mergeGROUPBY(self, query):
self.GROUPBY.extend(query.GROUPBY)
def setOFFSET(self, value):
# Basically the start of slicing. This can normally be a negative
# number in python. For now (and probably forever), not supported.
# i.e. my_list[-10:] is not supported
if type(value) != int:
raise RuntimeError("Can only slice with integer")
if value < 0:
raise RuntimeError("Slicing with negative index is not allowed")
if self.OFFSET is None:
self.OFFSET = value
if self.OFFSET < value:
self.OFFSET = value
def setLIMIT(self, value):
# Basically the stop of slicing. This can normally be a negative
# number in python. For now (and probably forever), not supported.
# i.e. my_list[:-1] is not supported
if type(value) != int:
raise RuntimeError("Can only slice with integer")
if value < 0:
raise RuntimeError("Slicing with negative index is not allowed")
if self.LIMIT is None:
self.LIMIT = value
if self.LIMIT > value:
self.LIMIT = value
def addORDERBY(self, value):
self.ORDERBY.append(value)
def mergeORDERBY(self, query):
self.ORDERBY.extend(query.ORDERBY)
def mergeQuery(self, query, how=None):
self.mergeSELECT(query)
self.mergeWHERE(query, how)
self.mergeGROUPBY(query)
self.mergeORDERBY(query)
if self.OFFSET is not None and query.OFFSET is not None:
raise RuntimeError("Multiple slicing asked")
if self.OFFSET is None:
self.OFFSET = query.OFFSET
if self.LIMIT is not None and query.LIMIT is not None:
raise RuntimeError("Multiple slicing asked")
if self.LIMIT is None:
self.LIMIT = query.LIMIT
def buildQuery(self):
data = {}
# print("Building query")
# print(self.SELECT, len(self.SELECT))
if len(self.SELECT) == 0:
# print("Replacing SELECT with *")
data["select"] = '*'
else:
data["select"] = ",".join(self.SELECT.keys())
if self.WHERE is not None:
# data["where"] = " ".join(self.WHERE)
data["where"] = self.WHERE
if len(self.GROUPBY) > 0:
data["groupBy"] = ",".join(self.GROUPBY)
if self.OFFSET is not None:
data["offset"] = self.OFFSET
if self.LIMIT is not None:
data["limit"] = self.LIMIT
if len(self.ORDERBY) > 0:
data["orderBy"] = ",".join(self.ORDERBY)
return data
def executeQuery(self, format):
query = self.buildQuery()
query["format"] = format
logging.debug("REST params\n{}".format(json.dumps(query)))
select_url = self.dataset_url + "/query"
try:
# logging.info(select_url)
response = requests.get(select_url, params=query)
logging.info("URL poked {}".format(response.url))
except requests.HTTPError as e:
logging.error("Code: {}\nReason: {}".format(
e.status_code, e.reason))
logging.error("Content: {}".format(response.content))
logging.error(traceback.format_exc())
if response.status_code != 200:
logging.error("Code: {}\nReason: {}".format(
response.status_code, response.reason))
logging.error("Content: {}".format(response.content))
logging.error(traceback.format_exc())
try:
return response.json()
except:
return {}
def __or__(self, value):
if isinstance(value, Query):
query = self.copy()
# self.addWHERE('OR')
query.mergeQuery(value, "OR")
return query
def __and__(self, value):
if isinstance(value, Query):
query = self.copy()
# self.addWHERE('AND')
query.mergeQuery(value, "AND")
return query
def __rand__(self, value):
raise NotImplementedError()
def __ror__(self, value):
raise NotImplementedError()
def copy(self):
query = Query(self.dataset_url)
query.SELECT = copy.deepcopy(self.SELECT)
query.WHERE = copy.deepcopy(self.WHERE)
query.ORDERBY = copy.deepcopy(self.ORDERBY)
query.GROUPBY = copy.deepcopy(self.GROUPBY)
query.OFFSET = copy.deepcopy(self.OFFSET)
query.LIMIT = copy.deepcopy(self.LIMIT)
return query
def __repr__(self):
return json.dumps(self.buildQuery(), indent=4)
def __str__(self):
return json.dumps(self.buildQuery(), indent=4)
| datacratic/pymldb | pymldb/query.py | Python | isc | 6,753 | 0.000592 |
from .decorators import render_to_json
from .helper import HeadFileUploader, ImageFactory, BaseModelManager, get_first_letter, convertjson | xlk521/cloudguantou | utils/__init__.py | Python | bsd-3-clause | 138 | 0.014493 |
"""This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import create_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
create_dynamic = None
from importlib._bootstrap import _ERR_MSG, _exec, _load, _builtin_from_name
from importlib._bootstrap_external import SourcelessFileLoader
from importlib import machinery
from importlib import util
import importlib
import os
import sys
import tokenize
import types
import warnings
warnings.warn("the imp module is deprecated in favour of importlib; "
"see the module's documentation for alternative uses",
PendingDeprecationWarning, stacklevel=2)
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def new_module(name):
"""**DEPRECATED**
Create a new module.
The module is not entered into sys.modules.
"""
return types.ModuleType(name)
def get_magic():
"""**DEPRECATED**
Return the magic number for .pyc files.
"""
return util.MAGIC_NUMBER
def get_tag():
"""Return the magic tag for .pyc files."""
return sys.implementation.cache_tag
def cache_from_source(path, debug_override=None):
"""**DEPRECATED**
Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return util.cache_from_source(path, debug_override)
def source_from_cache(path):
"""**DEPRECATED**
Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
return util.source_from_cache(path)
def get_suffixes():
"""**DEPRECATED**"""
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""**DEPRECATED**
Null import object.
"""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibility support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
def load_source(name, pathname, file=None):
loader = _LoadSourceCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = machinery.SourceFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
def load_compiled(name, pathname, file=None):
"""**DEPRECATED**"""
loader = _LoadCompiledCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = SourcelessFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
def load_package(name, path):
"""**DEPRECATED**"""
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
spec = util.spec_from_file_location(name, path,
submodule_search_locations=[])
if name in sys.modules:
return _exec(spec, sys.modules[name])
else:
return _load(spec)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'list' must be None or a list, "
"not {}".format(type(name)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_ERR_MSG.format(name), name=name)
encoding = None
if 'b' not in mode:
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
def reload(module):
"""**DEPRECATED**
Reload the module and return it.
The module must have been successfully imported before.
"""
return importlib.reload(module)
def init_builtin(name):
"""**DEPRECATED**
Load and return a built-in module by name, or None is such module doesn't
exist
"""
try:
return _builtin_from_name(name)
except ImportError:
return None
if create_dynamic:
def load_dynamic(name, path, file=None):
"""**DEPRECATED**
Load an extension module.
"""
import importlib.machinery
loader = importlib.machinery.ExtensionFileLoader(name, path)
return loader.load_module()
else:
load_dynamic = None
| ms-iot/python | cpython/Lib/imp.py | Python | bsd-3-clause | 10,431 | 0.000096 |
import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file,sweeps
from kid_readout.analysis.resonator.legacy_resonator import fit_best_resonator
ri = baseband.RoachBaseband()
ri.initialize()
#ri.set_fft_gain(6)
#f0s = np.load('/home/gjones/workspace/apps/f8_fit_resonances.npy')
#f0s = np.load('/home/gjones/workspace/apps/first_pass_sc3x3_0813f9.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f10_first_pass.npy')#[:4]
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f9_2014-02-11.npy')
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f5_2014-04-15.npy')
f0s = np.load('/home/gjones/readout/kid_readout/apps/2015-05-07-jpl5x4.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f12.npy')
f0s.sort()
nf = len(f0s)
atonce = 8
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
offsets = np.linspace(-4882.8125,4638.671875,20)#[5:15]
offsets = offsets
#offsets = np.concatenate(([-40e3,-20e3],offsets,[20e3,40e3]))/1e6
offsets = np.concatenate(([-40e3],offsets,[40e3]))/1e6
#offsets = offsets*4
nsamp = 2**20
step = 1
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-21,21)*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([-20e-3,],offsets,[20e-3]))
print f0s
print offsets*1e6
print len(f0s)
if False:
from kid_readout.equipment.parse_srs import get_all_temperature_data
while True:
temp = get_all_temperature_data()[1][-1]
print "mk stage at", temp
if temp > 0.348:
break
time.sleep(300)
time.sleep(600)
start = time.time()
first = True
attenlist = [27,24,21,18,15,12]
for atten in attenlist:
print "setting attenuator to",atten
ri.set_dac_attenuator(atten)
if first:
nsamp = 2**20
step = 1
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-21,21)*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([-20e-3,],offsets,[20e-3]))
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded waveforms in", (time.time()-start),"seconds"
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8)
orig_sweep_data = sweep_data
meas_cfs = []
idxs = []
delays = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = fit_best_resonator(fr[1:-1],s21[1:-1],errors=errors[1:-1]) #Resonator(fr,s21,errors=errors)
delay = res.delay
delays.append(delay)
s21 = s21*np.exp(2j*np.pi*res.delay*fr)
res = fit_best_resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0, "delay",delay,"resid delay",res.delay
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
delay = np.median(delays)
print "median delay is ",delay
first = False
else:
orig_sweep_data = None
nsamp = 2**22
step = 1
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.array([-8,-4,-2,-1,0,1,2,4])#np.arange(-4,4)*step
offset_bins = np.concatenate(([-40,-20],offset_bins,[20,40]))
offsets = offset_bins*512.0/nsamp
meas_cfs = np.array(meas_cfs)
f0binned = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
f0s = f0binned
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded updated waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
df = data_file.DataFile() #(suffix='led')
df.log_hw_state(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8, sweep_data=orig_sweep_data)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
s21 = s21*np.exp(2j*np.pi*delay*fr)
res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
#raw_input("Turn on source and press enter to begin collecting data")
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.2)
t0 = time.time()
dmod,addr = ri.get_data_seconds(30,demod=True)
print nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
| ColumbiaCMB/kid_readout | apps/data_taking_scripts/old_scripts/highq_power_sweep_downstairs.py | Python | bsd-2-clause | 6,148 | 0.018705 |
# -*- coding: utf-8 -*-
import smtplib
from django.contrib.auth.models import Permission
from django.test import TestCase
from principal.forms import *
from principal.models import *
from principal.services import DepartmentService, CertificationService, UserService, ImpartSubjectService, \
AdministratorService
from gestionalumnos.settings import *
from django.core import mail
from django.test.utils import override_settings
class CertificationTestCase(TestCase):
def setUp(self):
# Departments
self.department_lsi = Departamento.objects.create(
codigo='1',
nombre='Departamento de Lenguajes y Sistemas Informaticos',
web='http://www.lsi.us.es'
)
self.department_dte = Departamento.objects.create(
codigo='2',
nombre='Departamento de Tecnologia Electronica',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores'
)
self.department_atc = Departamento.objects.create(
codigo='3',
nombre='Departamento de Arquitectura y Tecnologia de Computadores',
web='http://www.atc.us.es/'
)
# Subjects
self.subject_egc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Evolucion y gestion de la configuracion',
curso='4',
codigo='2050032',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=111',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
self.subject_rc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Redes de computadores',
curso='2',
codigo='2050013',
creditos='6',
duracion='C',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores',
tipo_asignatura='OB',
departamento=self.department_dte,
)
self.subject_cm = Asignatura.objects.create(
cuatrimestre='1',
nombre='Computacion Movil',
curso='4',
codigo='2060045',
creditos='6',
duracion='C',
web='http://www.us.es/estudios/grados/plan_206/asignatura_2060045',
tipo_asignatura='OP',
departamento=self.department_atc,
)
self.subject_ispp = Asignatura.objects.create(
cuatrimestre='2',
nombre='Ingenieria del Software y Practica Profesional',
curso='4',
codigo='2050039',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
# Certifications
self.certification_isw = Titulacion.objects.create(
codigo='1',
nombre='Grado en Informatica - Ingenieria del Software',
)
self.certification_isw.asignaturas.add(self.subject_rc, self.subject_ispp, self.subject_egc)
self.certification_isc = Titulacion.objects.create(
codigo='2',
nombre='Grado en Informatica - Ingenieria de Computadores',
)
self.certification_isc.asignaturas.add(self.subject_rc)
self.certification_iti = Titulacion.objects.create(
codigo='3',
nombre='Grado en Informatica - Tecnologias Informaticas',
)
self.certification_iti.asignaturas.add(self.subject_cm, self.subject_rc)
def test_create_and_save_ok_1(self):
data_form = {
'code': '123456',
'name': 'Grado en Informatica - Tecnologias Informaticas'
}
form = CertificationEditForm(data=data_form)
self.assertEqual(form.is_valid(), True)
certification = CertificationService.create_and_save(form)
certification_bd = Titulacion.objects.get(codigo=123456)
self.assertEqual(certification_bd, certification)
def test_create_and_save_error_1(self):
data_form = {
'code': '1',
'name': 'Grado en Informatica - Ingenieria del Software'
}
form = CertificationEditForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_find_all_ok_1(self):
certifications = list(CertificationService.find_all())
list_certifications = [self.certification_isc, self.certification_isw, self.certification_iti]
self.assertListEqual(certifications, list_certifications)
def test_find_by_code_ok_1(self):
certification = CertificationService.find_by_code('2')
self.assertEqual(certification, self.certification_isc)
def test_find_by_code_error_1(self):
certification = CertificationService.find_by_code('99')
self.assertEqual(certification, None)
def test_find_by_subject_ok_1(self):
certifications = list(CertificationService.find_by_subject(self.subject_rc.id))
list_certifications = [self.certification_isw, self.certification_isc, self.certification_iti]
self.assertListEqual(certifications, list_certifications)
def test_find_by_subject_ok_2(self):
certifications = list(CertificationService.find_by_subject(self.subject_ispp.id))
list_certifications = [self.certification_isw]
self.assertListEqual(certifications, list_certifications)
def test_find_by_subject_ok_3(self):
certifications = list(CertificationService.find_by_subject('4874'))
self.assertListEqual(certifications, [])
def test_search_ok_1(self):
certifications = list(CertificationService.search('Grado'))
list_certifications = [self.certification_isc, self.certification_isw, self.certification_iti]
self.assertListEqual(certifications, list_certifications)
def test_search_ok_2(self):
certifications = list(CertificationService.search('i'))
list_certifications = [self.certification_isc, self.certification_isw, self.certification_iti]
self.assertListEqual(certifications, list_certifications)
def test_search_ok_3(self):
certifications = list(CertificationService.search('Tecnologias'))
list_certifications = [self.certification_iti]
self.assertListEqual(certifications, list_certifications)
def test_search_ok_4(self):
certifications = list(CertificationService.search('z'))
self.assertListEqual(certifications, [])
def test_find_one_ok_1(self):
certification = CertificationService.find_one(self.certification_isw.id)
self.assertEqual(certification, self.certification_isw)
class AdministratorTestCase(TestCase):
def setUp(self):
# Administrators
self.administrator1 = Administrador.objects.create(
username='admin',
is_staff=True,
is_superuser=False
)
self.administrator1.set_password('admin')
self.administrator1.user_permissions.add(Permission.objects.get(codename='administrator'))
self.administrator1.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
def test_find_one_ok_1(self):
administrator = AdministratorService.find_one(self.administrator1.id)
self.assertEqual(administrator, self.administrator1)
class DepartmentTestCase(TestCase):
def setUp(self):
# Departments
self.department_lsi = Departamento.objects.create(
codigo='1',
nombre='Departamento de Lenguajes y Sistemas Informaticos',
web='http://www.lsi.us.es'
)
self.department_dte = Departamento.objects.create(
codigo='2',
nombre='Departamento de Tecnologia Electronica',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores'
)
self.department_atc = Departamento.objects.create(
codigo='3',
nombre='Departamento de Arquitectura y Tecnologia de Computadores',
web='http://www.atc.us.es/'
)
def test_reconstruct_and_save_ok_1(self):
data_form = {
'code': '4',
'name': 'Departamento de Fisica',
'web': 'http://www.fisica.us.es/'
}
form = DepartmentEditForm(data=data_form)
self.assertEqual(form.is_valid(), True)
department = DepartmentService.reconstruct_and_save(form)
department_bd = Departamento.objects.get(codigo=4)
self.assertEqual(department_bd, department)
def test_reconstruct_and_save_ok_2(self):
data_form = DepartmentService.get_form_data(self.department_lsi)
data_form['name'] = 'Test'
form = DepartmentEditForm(data=data_form)
self.assertEqual(form.is_valid(), True)
department = DepartmentService.reconstruct_and_save(form)
department_bd = Departamento.objects.get(id=self.department_lsi.id)
self.assertEqual(department_bd, department)
self.assertEqual(department_bd.nombre, 'Test')
def test_reconstruct_and_save_error_1(self):
data_form = DepartmentService.get_form_data(self.department_lsi)
data_form['code'] = '3'
form = DepartmentEditForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_reconstruct_and_save_error_2(self):
data_form = DepartmentService.get_form_data(self.department_lsi)
data_form['id'] = '4944'
form = DepartmentEditForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_reconstruct_and_save_error_3(self):
data_form = DepartmentService.get_form_data(self.department_lsi)
data_form['id'] = None
form = DepartmentEditForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_find_all_ok_1(self):
departments = list(DepartmentService.find_all())
list_departments = [self.department_atc, self.department_lsi, self.department_dte]
self.assertListEqual(departments, list_departments)
def test_find_by_code_ok_1(self):
department = DepartmentService.find_by_code('3')
self.assertEqual(department, self.department_atc)
def test_find_by_code_error_1(self):
department = DepartmentService.find_by_code('99')
self.assertEqual(department, None)
def test_get_form_data_ok_1(self):
data_form = DepartmentService.get_form_data(self.department_atc)
data_form1 = {
'id': self.department_atc.id,
'code': self.department_atc.codigo,
'name': self.department_atc.nombre,
'web': self.department_atc.web
}
self.assertDictEqual(data_form, data_form1)
def test_get_form_data_error_1(self):
data_form = DepartmentService.get_form_data(self.department_atc)
data_form1 = {
'id': self.department_atc.id,
'code': '324245',
'name': self.department_atc.nombre,
'web': self.department_atc.web
}
self.assertNotEqual(data_form, data_form1)
def test_search_ok_1(self):
departments = list(DepartmentService.search('Departamento'))
list_departments = [self.department_atc, self.department_lsi, self.department_dte]
self.assertListEqual(departments, list_departments)
def test_search_ok_2(self):
departments = list(DepartmentService.search('i'))
list_departments = [self.department_atc, self.department_lsi, self.department_dte]
self.assertListEqual(departments, list_departments)
def test_search_ok_3(self):
departments = list(DepartmentService.search('Lenguajes'))
list_departments = [self.department_lsi]
self.assertListEqual(departments, list_departments)
def test_search_ok_4(self):
departments = list(DepartmentService.search('zz'))
self.assertListEqual(departments, [])
def test_get_form_data_xml_ok_1(self):
department = {
'codigo': self.department_atc.codigo,
'nombre': self.department_atc.nombre,
'web': self.department_atc.web
}
data_form = DepartmentService.get_form_data_xml(department)
data_form1 = {
'code': self.department_atc.codigo,
'name': self.department_atc.nombre,
'web': self.department_atc.web
}
self.assertDictEqual(data_form, data_form1)
def test_get_form_data_xml_error_1(self):
department = {
'codigo': '946514',
'nombre': self.department_atc.nombre,
'web': self.department_atc.web
}
data_form = DepartmentService.get_form_data_xml(department)
data_form1 = {
'code': self.department_atc.codigo,
'name': self.department_atc.nombre,
'web': self.department_atc.web
}
self.assertNotEqual(data_form, data_form1)
def test_get_form_data_csv_ok_1(self):
department = [
self.department_atc.codigo,
self.department_atc.nombre,
self.department_atc.web
]
data_form = DepartmentService.get_form_data_csv(department)
data_form1 = {
'code': self.department_atc.codigo,
'name': self.department_atc.nombre,
'web': self.department_atc.web
}
self.assertDictEqual(data_form, data_form1)
def test_get_form_data_csv_error_1(self):
department = [
'49498',
self.department_atc.nombre,
self.department_atc.web
]
data_form = DepartmentService.get_form_data_csv(department)
data_form1 = {
'code': self.department_atc.codigo,
'name': self.department_atc.nombre,
'web': self.department_atc.web
}
self.assertNotEqual(data_form, data_form1)
def test_rollback_ok_1(self):
departments = list(DepartmentService.find_all())
list_departments = [self.department_atc, self.department_lsi, self.department_dte]
self.assertListEqual(departments, list_departments)
DepartmentService.rollback(list_departments)
departments = list(DepartmentService.find_all())
self.assertListEqual([], departments)
def test_find_one_ok_1(self):
department = DepartmentService.find_one(self.department_atc.id)
self.assertEqual(department, self.department_atc)
class ImpartSubjectTestCase(TestCase):
def setUp(self):
# Departments
self.department_lsi = Departamento.objects.create(
codigo='1',
nombre='Departamento de Lenguajes y Sistemas Informaticos',
web='http://www.lsi.us.es'
)
self.department_dte = Departamento.objects.create(
codigo='2',
nombre='Departamento de Tecnologia Electronica',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores'
)
self.department_atc = Departamento.objects.create(
codigo='3',
nombre='Departamento de Arquitectura y Tecnologia de Computadores',
web='http://www.atc.us.es/'
)
# Subjects
self.subject_egc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Evolucion y gestion de la configuracion',
curso='4',
codigo='2050032',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=111',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
self.subject_rc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Redes de computadores',
curso='2',
codigo='2050013',
creditos='6',
duracion='C',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores',
tipo_asignatura='OB',
departamento=self.department_dte,
)
self.subject_cm = Asignatura.objects.create(
cuatrimestre='1',
nombre='Computacion Movil',
curso='4',
codigo='2060045',
creditos='6',
duracion='C',
web='http://www.us.es/estudios/grados/plan_206/asignatura_2060045',
tipo_asignatura='OP',
departamento=self.department_atc,
)
self.subject_ispp = Asignatura.objects.create(
cuatrimestre='2',
nombre='Ingenieria del Software y Practica Profesional',
curso='4',
codigo='2050039',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
# Lecturers
self.lecturer_benavides = Profesor.objects.create(
username='benavides',
email='benavides@us.es',
categoria='Profesor Titular de Universidad',
telefono='954559897',
despacho='F 0.48',
web='http://www.lsi.us.es/~dbc/',
first_name='David',
last_name='Benavides Cuevas',
tutoriaactivada=True,
dni='55555555X'
)
self.lecturer_benavides.set_password('practica')
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_corchuelo = Profesor.objects.create(
username='corchu',
email='corchu@us.es',
categoria='Profesor Titular de Universidad',
telefono='954552770',
despacho='F 1.63',
first_name='Rafael',
last_name='Corchuelo Gil',
web='https://www.lsi.us.es/personal/pagina_personal.php?id=12',
tutoriaactivada=True,
dni='66666666X'
)
self.lecturer_corchuelo.set_password('practica')
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_muller = Profesor.objects.create(
username='cmuller',
email='cmuller@lsi.us.es',
categoria='Becario FPI',
telefono='954553868',
despacho='F 0.43',
first_name='Carlos',
last_name='Muller Cejas',
web='https://www.lsi.us.es/personal/pagina_personal.php?id=108',
tutoriaactivada=True,
dni='77777777X'
)
self.lecturer_muller.set_password('practica')
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_veronica = Profesor.objects.create(
username='averonica',
email='cmuller@lsi.us.es',
categoria='Profesor Titular de Universidad ',
telefono='954557095 ',
despacho='G 1.69',
first_name='Ana Veronica',
last_name='Medina Rodriguez',
web='http://www.dte.us.es/personal/vmedina/',
tutoriaactivada=True,
dni='88888888X'
)
self.lecturer_veronica.set_password('practica')
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
def test_reconstruct_and_save_ok_1(self):
data_form = {
'subject_id': self.subject_ispp.id,
'lecturer_id': self.lecturer_corchuelo.id,
'lecturer': self.lecturer_corchuelo.first_name + self.lecturer_corchuelo.last_name,
'position': 'Coordinador'
}
form = UserLinkSubjectForm(data=data_form)
self.assertEqual(form.is_valid(), True)
impart_subject = ImpartSubjectService.reconstruct_and_save(form)
impart_subject_bd = Imparteasignatura.objects.get(profesor=self.lecturer_corchuelo,
asignatura=self.subject_ispp)
self.assertEqual(impart_subject, impart_subject_bd)
def test_reconstruct_and_save_error_1(self):
data_form = {
'subject_id': '',
'lecturer_id': self.lecturer_corchuelo.id,
'lecturer': self.lecturer_corchuelo.first_name + self.lecturer_corchuelo.last_name,
'position': 'Coordinador'
}
form = UserLinkSubjectForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_reconstruct_and_save_error_2(self):
data_form = {
'subject_id': self.subject_ispp.id,
'lecturer_id': '',
'lecturer': self.lecturer_corchuelo.first_name + self.lecturer_corchuelo.last_name,
'position': 'Coordinador'
}
form = UserLinkSubjectForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_reconstruct_and_save_error_3(self):
data_form = {
'subject_id': self.subject_ispp.id,
'lecturer_id': self.lecturer_corchuelo.id,
'lecturer': self.lecturer_corchuelo.first_name + self.lecturer_corchuelo.last_name,
'position': ''
}
form = UserLinkSubjectForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_reconstruct_and_save_error_4(self):
data_form = {
'subject_id': '99854',
'lecturer_id': self.lecturer_corchuelo.id,
'lecturer': self.lecturer_corchuelo.first_name + self.lecturer_corchuelo.last_name,
'position': ''
}
form = UserLinkSubjectForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_reconstruct_and_save_error_5(self):
data_form = {
'subject_id': self.subject_ispp.id,
'lecturer_id': '74985',
'lecturer': self.lecturer_corchuelo.first_name + self.lecturer_corchuelo.last_name,
'position': ''
}
form = UserLinkSubjectForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_get_form_data_xml_ok_1(self):
lecturer = {
'uvus': self.lecturer_muller.username,
'cargo': 'Profesor'
}
data_form = ImpartSubjectService.get_form_data_xml(lecturer, self.subject_ispp)
data = {
'subject_id': self.subject_ispp.id,
'lecturer_id': self.lecturer_muller.id,
'lecturer': self.lecturer_muller.first_name + self.lecturer_muller.last_name,
'position': 'Profesor'
}
self.assertDictEqual(data_form, data)
def test_get_form_data_xml_error_1(self):
lecturer = {
'uvus': self.lecturer_muller.username,
'cargo': 'Profesor'
}
data_form = ImpartSubjectService.get_form_data_xml(lecturer, self.subject_ispp)
data = {
'subject_id': self.subject_ispp.id,
'lecturer_id': '-1',
'lecturer': self.lecturer_muller.first_name + self.lecturer_muller.last_name,
'position': 'Profesor'
}
self.assertNotEqual(data_form, data)
def test_get_form_data_xml_error_2(self):
lecturer = {
'uvus': self.lecturer_muller.username,
'cargo': 'Profesor'
}
data_form = ImpartSubjectService.get_form_data_xml(lecturer, self.subject_ispp)
data = {
'subject_id': '-1',
'lecturer_id': self.lecturer_muller.id,
'lecturer': self.lecturer_muller.first_name + self.lecturer_muller.last_name,
'position': 'Profesor'
}
self.assertNotEqual(data_form, data)
# def test_get_form_data_csv_ok_1(self):
#
# lecturer = [
# 'Profesor',
# self.lecturer_muller.dni,
# self.lecturer_muller.last_name + "," + self.lecturer_muller.first_name,
# self.lecturer_muller.username,
# 'null',
# 'Coordinador'
# ]
# data_form = ImpartSubjectService.get_form_data_csv(lecturer, self.subject_ispp)
# data = {
# 'subject_id': self.subject_ispp.id,
# 'lecturer_id': self.lecturer_muller.id,
# 'lecturer': "" + self.lecturer_muller.first_name + " " + self.lecturer_muller.last_name,
# 'position': 'Profesor'
# }
# self.assertEqual(data_form, data)
class UserTestCase(TestCase):
def setUp(self):
# Departments
self.department_lsi = Departamento.objects.create(
codigo='1',
nombre='Departamento de Lenguajes y Sistemas Informaticos',
web='http://www.lsi.us.es'
)
self.department_dte = Departamento.objects.create(
codigo='2',
nombre='Departamento de Tecnologia Electronica',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores'
)
self.department_atc = Departamento.objects.create(
codigo='3',
nombre='Departamento de Arquitectura y Tecnologia de Computadores',
web='http://www.atc.us.es/'
)
# Subjects
self.subject_egc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Evolucion y gestion de la configuracion',
curso='4',
codigo='2050032',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=111',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
self.subject_rc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Redes de computadores',
curso='2',
codigo='2050013',
creditos='6',
duracion='C',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores',
tipo_asignatura='OB',
departamento=self.department_dte,
)
self.subject_cm = Asignatura.objects.create(
cuatrimestre='1',
nombre='Computacion Movil',
curso='4',
codigo='2060045',
creditos='6',
duracion='C',
web='http://www.us.es/estudios/grados/plan_206/asignatura_2060045',
tipo_asignatura='OP',
departamento=self.department_atc,
)
self.subject_ispp = Asignatura.objects.create(
cuatrimestre='2',
nombre='Ingenieria del Software y Practica Profesional',
curso='4',
codigo='2050039',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
# Alumnos
self.student_carborgar = Alumno.objects.create(
username='carborgar',
first_name='Carlos',
last_name='Borja Garcia - Baquero',
email='carborgar@alum.us.es',
dni='47537495X'
)
self.student_carborgar.set_password('practica')
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_juamaiosu = Alumno.objects.create(
username='juamaiosu',
first_name='Juan Elias',
last_name='Maireles Osuna',
email='juamaiosu@alum.us.es',
dni='47537560X'
)
self.student_juamaiosu.set_password('practica')
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_rubgombar = Alumno.objects.create(
username='rubgombar',
first_name='Ruben',
last_name='Gomez Barrera',
email='ruben@alum.us.es',
dni='11111111X'
)
self.student_rubgombar.set_password('practica')
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_davjimvar = Alumno.objects.create(
username='davjimvar',
first_name='David',
last_name='Jimenez Vargas',
email='david@alum.us.es',
dni='22222222X'
)
self.student_davjimvar.set_password('practica')
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_javrodleo = Alumno.objects.create(
username='javrodleo',
first_name='Javier',
last_name='Rodriguez Leon',
email='javier@alum.us.es',
dni='33333333X'
)
self.student_javrodleo.set_password('practica')
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
# Lecturers
self.lecturer_benavides = Profesor.objects.create(
username='benavides',
email='benavides@us.es',
categoria='Profesor Titular de Universidad',
telefono='954559897',
despacho='F 0.48',
web='http://www.lsi.us.es/~dbc/',
first_name='David',
last_name='Benavides Cuevas',
tutoriaactivada=True,
dni='55555555X'
)
self.lecturer_benavides.set_password('practica')
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_corchuelo = Profesor.objects.create(
username='corchu',
email='corchu@us.es',
categoria='Profesor Titular de Universidad',
telefono='954552770',
despacho='F 1.63',
first_name='Rafael',
last_name='Corchuelo Gil',
web='https://www.lsi.us.es/personal/pagina_personal.php?id=12',
tutoriaactivada=True,
dni='66666666X'
)
self.lecturer_corchuelo.set_password('practica')
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_muller = Profesor.objects.create(
username='cmuller',
email='cmuller@lsi.us.es',
categoria='Becario FPI',
telefono='954553868',
despacho='F 0.43',
first_name='Carlos',
last_name='Muller Cejas',
web='https://www.lsi.us.es/personal/pagina_personal.php?id=108',
tutoriaactivada=True,
dni='77777777X'
)
self.lecturer_muller.set_password('practica')
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_veronica = Profesor.objects.create(
username='averonica',
email='cmuller@lsi.us.es',
categoria='Profesor Titular de Universidad ',
telefono='954557095 ',
despacho='G 1.69',
first_name='Ana Veronica',
last_name='Medina Rodriguez',
web='http://www.dte.us.es/personal/vmedina/',
tutoriaactivada=True,
dni='88888888X'
)
self.lecturer_veronica.set_password('practica')
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.impart_ispp = Imparteasignatura.objects.create(
cargo='Coordinador',
profesor=self.lecturer_corchuelo,
asignatura=self.subject_ispp
)
self.impart_ispp = Imparteasignatura.objects.create(
cargo='Profesor',
profesor=self.lecturer_muller,
asignatura=self.subject_ispp
)
self.impart_egc = Imparteasignatura.objects.create(
cargo='Coordinador',
profesor=self.lecturer_benavides,
asignatura=self.subject_egc
)
self.student_carborgar.asignaturas = [self.subject_egc, self.subject_ispp]
self.student_juamaiosu.asignaturas = [self.subject_egc]
def test_find_by_username_ok_1(self):
user = UserService.find_by_username(self.student_carborgar.username)
user_db = User.objects.get(username=self.student_carborgar.username)
self.assertEqual(user, user_db)
def test_find_by_username_error_1(self):
user = UserService.find_by_username('ghslih')
self.assertEqual(user, None)
def test_delete_ok_1(self):
username = self.student_carborgar.username
UserService.delete(self.student_carborgar)
error = False
try:
User.objects.get(username=username)
except User.DoesNotExist:
error = True
self.assertTrue(error)
def test_rollback_users_ok_1(self):
user_create = {self.lecturer_muller: 'password', self.lecturer_corchuelo: 'password'}
len_list1 = len(list(UserService.find_all()))
UserService.rollback_users(user_create)
len_list2 = len(list(UserService.find_all()))
self.assertIs(len_list1 - 2, len_list2)
def test_rollback_ok_1(self):
number_link_student_carborgar1 = len(list(self.student_carborgar.asignaturas.all()))
number_link_student_juamaiosu1 = len(list(self.student_juamaiosu.asignaturas.all()))
number_link_lecturer_benavides1 = len(list(self.lecturer_benavides.imparteasignatura_set.all()))
student_link = [self.student_juamaiosu, self.student_carborgar]
lecturer_link = [self.lecturer_benavides]
user_create = [self.lecturer_veronica]
username = self.lecturer_veronica.username
UserService.rollback(user_create, student_link, lecturer_link, self.subject_egc.id)
number_link_student_carborgar2 = len(list(self.student_carborgar.asignaturas.all()))
number_link_student_juamaiosu2 = len(list(self.student_juamaiosu.asignaturas.all()))
number_link_lecturer_benavides2 = len(list(self.lecturer_benavides.imparteasignatura_set.all()))
self.assertEqual(number_link_student_carborgar1 - 1, number_link_student_carborgar2)
self.assertEqual(number_link_student_juamaiosu1 - 1, number_link_student_juamaiosu2)
self.assertEqual(number_link_lecturer_benavides1 - 1, number_link_lecturer_benavides2)
error = False
try:
User.objects.get(username=username)
except User.DoesNotExist:
error = True
self.assertTrue(error)
class SubjectTestCase(TestCase):
def setUp(self):
# Departments
self.department_lsi = Departamento.objects.create(
codigo='1',
nombre='Departamento de Lenguajes y Sistemas Informaticos',
web='http://www.lsi.us.es'
)
self.department_dte = Departamento.objects.create(
codigo='2',
nombre='Departamento de Tecnologia Electronica',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores'
)
self.department_atc = Departamento.objects.create(
codigo='3',
nombre='Departamento de Arquitectura y Tecnologia de Computadores',
web='http://www.atc.us.es/'
)
# Subjects
self.subject_egc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Evolucion y gestion de la configuracion',
curso='4',
codigo='1',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=111',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
self.subject_rc = Asignatura.objects.create(
cuatrimestre='1',
nombre='Redes de computadores',
curso='2',
codigo='2',
creditos='6',
duracion='C',
web='https://www.dte.us.es/docencia/etsii/gii-is/redes-de-computadores',
tipo_asignatura='OB',
departamento=self.department_dte,
)
self.subject_cm = Asignatura.objects.create(
cuatrimestre='1',
nombre='Computacion Movil',
curso='4',
codigo='3',
creditos='6',
duracion='C',
web='http://www.us.es/estudios/grados/plan_206/asignatura_2060045',
tipo_asignatura='OP',
departamento=self.department_atc,
)
self.subject_ispp = Asignatura.objects.create(
cuatrimestre='2',
nombre='Ingenieria del Software y Practica Profesional',
curso='4',
codigo='4',
creditos='6',
duracion='C',
web='http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
tipo_asignatura='OB',
departamento=self.department_lsi,
)
# Alumnos
self.student_carborgar = Alumno.objects.create(
username='carborgar',
first_name='Carlos',
last_name='Borja Garcia - Baquero',
email='carborgar@alum.us.es',
dni='47537495X'
)
self.student_carborgar.set_password('practica')
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_carborgar.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_juamaiosu = Alumno.objects.create(
username='juamaiosu',
first_name='Juan Elias',
last_name='Maireles Osuna',
email='juamaiosu@alum.us.es',
dni='47537560X'
)
self.student_juamaiosu.set_password('practica')
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_juamaiosu.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_rubgombar = Alumno.objects.create(
username='rubgombar',
first_name='Ruben',
last_name='Gomez Barrera',
email='ruben@alum.us.es',
dni='11111111X'
)
self.student_rubgombar.set_password('practica')
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_rubgombar.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_davjimvar = Alumno.objects.create(
username='davjimvar',
first_name='David',
last_name='Jimenez Vargas',
email='david@alum.us.es',
dni='22222222X'
)
self.student_davjimvar.set_password('practica')
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_davjimvar.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
self.student_javrodleo = Alumno.objects.create(
username='javrodleo',
first_name='Javier',
last_name='Rodriguez Leon',
email='javier@alum.us.es',
dni='33333333X'
)
self.student_javrodleo.set_password('practica')
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='alumno'))
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.student_javrodleo.user_permissions.add(Permission.objects.get(codename='view_subject_details'))
# Lecturers
self.lecturer_benavides = Profesor.objects.create(
username='benavides',
email='benavides@us.es',
categoria='Profesor Titular de Universidad',
telefono='954559897',
despacho='F 0.48',
web='http://www.lsi.us.es/~dbc/',
first_name='David',
last_name='Benavides Cuevas',
tutoriaactivada=True,
dni='55555555X'
)
self.lecturer_benavides.set_password('practica')
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_benavides.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_corchuelo = Profesor.objects.create(
username='corchu',
email='corchu@us.es',
categoria='Profesor Titular de Universidad',
telefono='954552770',
despacho='F 1.63',
first_name='Rafael',
last_name='Corchuelo Gil',
web='https://www.lsi.us.es/personal/pagina_personal.php?id=12',
tutoriaactivada=True,
dni='66666666X'
)
self.lecturer_corchuelo.set_password('practica')
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_corchuelo.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_muller = Profesor.objects.create(
username='cmuller',
email='cmuller@lsi.us.es',
categoria='Becario FPI',
telefono='954553868',
despacho='F 0.43',
first_name='Carlos',
last_name='Muller Cejas',
web='https://www.lsi.us.es/personal/pagina_personal.php?id=108',
tutoriaactivada=True,
dni='77777777X'
)
self.lecturer_muller.set_password('practica')
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_muller.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.lecturer_veronica = Profesor.objects.create(
username='averonica',
email='cmuller@lsi.us.es',
categoria='Profesor Titular de Universidad ',
telefono='954557095 ',
despacho='G 1.69',
first_name='Ana Veronica',
last_name='Medina Rodriguez',
web='http://www.dte.us.es/personal/vmedina/',
tutoriaactivada=True,
dni='88888888X'
)
self.lecturer_veronica.set_password('practica')
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='profesor'))
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='view_certification_list'))
self.lecturer_veronica.user_permissions.add(Permission.objects.get(codename='view_tutorial_request_list'))
self.impart_ispp_corchu = Imparteasignatura.objects.create(
cargo='Coordinador',
profesor=self.lecturer_corchuelo,
asignatura=self.subject_ispp
)
self.impart_ispp_muller = Imparteasignatura.objects.create(
cargo='Profesor',
profesor=self.lecturer_muller,
asignatura=self.subject_ispp
)
self.impart_ispp_benavides = Imparteasignatura.objects.create(
cargo='Coordinador',
profesor=self.lecturer_benavides,
asignatura=self.subject_ispp
)
self.impart_egc_benavides = Imparteasignatura.objects.create(
cargo='Coordinador',
profesor=self.lecturer_benavides,
asignatura=self.subject_egc
)
self.student_carborgar.asignaturas = [self.subject_egc, self.subject_ispp]
self.student_juamaiosu.asignaturas = [self.subject_egc]
def test_get_student_subjects_ok_1(self):
subjects = list(SubjectService.get_student_subjects(self.student_carborgar.id))
subjects1 = [self.subject_egc, self.subject_ispp]
self.assertListEqual(subjects, subjects1)
def test_get_lecturer_subjects_ok_1(self):
subjects = list(SubjectService.get_lecturer_subjects(self.lecturer_benavides.id))
subjects1 = [self.subject_egc, self.subject_ispp]
self.assertListEqual(subjects, subjects1)
def test_create_and_save_ok_1(self):
data_form = {
'name': 'Prueba',
'course': '1',
'code': '5',
'quarter': '1',
'credits': '6',
'web': 'http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
'duration': 'C',
'type': 'OB',
'departament': self.department_lsi.id,
}
form = SubjectEditForm(data=data_form)
self.assertEqual(form.is_valid(), True)
subject = SubjectService.create(form)
SubjectService.save(subject)
subject_bd = Asignatura.objects.get(codigo=subject.codigo)
self.assertEqual(subject, subject_bd)
def test_create_and_save_error_1(self):
data_form = {
'name': 'Prueba',
'course': '1',
'code': '4',
'quarter': '1',
'credits': '6',
'web': 'http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
'duration': 'C',
'type': 'OB',
'departament': self.department_lsi.id,
}
form = SubjectEditForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_create_and_save_error_2(self):
data_form = {
'name': 'Prueba',
'course': '10',
'code': '5',
'quarter': '1',
'credits': '6',
'web': 'http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
'duration': 'C',
'type': 'OB',
'departament': self.department_lsi.id,
}
form = SubjectEditForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_create_and_save_error_3(self):
data_form = {
'name': 'Prueba',
'course': '1',
'code': '5',
'quarter': '8',
'credits': '6',
'web': 'http://www.lsi.us.es/docencia/pagina_asignatura.php?id=110',
'duration': 'C',
'type': 'OB',
'departament': self.department_lsi.id,
}
form = SubjectEditForm(data=data_form)
self.assertEqual(form.is_valid(), False)
def test_find_by_code_ok_1(self):
subject = SubjectService.find_by_code(self.subject_ispp.codigo)
self.assertEqual(subject, self.subject_ispp)
def test_find_by_code_error_1(self):
subject = SubjectService.find_by_code('5')
self.assertEqual(subject, None)
def test_find_one_ok_1(self):
subject = SubjectService.find_one(self.subject_ispp.id)
self.assertEqual(subject, self.subject_ispp)
def test_find_one_error_1(self):
subject = SubjectService.find_one('-1')
self.assertEqual(subject, None)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend')
class EmailTestCase(TestCase):
def test_send_email(self):
try:
mail_sent_success = mail.send_mail('Test',
'Test',
EMAIL_HOST_USER, [EMAIL_HOST_USER],
fail_silently=True)
self.assertEqual(mail_sent_success, 1)
except Exception:
self.assertEqual(False, True, 'No se ha podido enviar el correo')
| carborgar/gestionalumnostfg | principal/tests.py | Python | mit | 52,227 | 0.002642 |
"""
The MIT License (MIT)
Copyright (c) Serenity Software, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from cahoots.parsers.base import BaseParser
from cahoots.util import truncate_text
from cahoots.config import BaseConfig
from cahoots.confidence.normalizer import HierarchicalNormalizerChain
import datetime
import threading
import time
import inspect
class ParserThread(threading.Thread):
"""Represents a thread that will handle one parser parsing request"""
config = None
data_string = None
results = []
def __init__(self, config, module, data_string):
"""
:param config: cahoots config
:type config: BaseConfig
:param module: the module this thread will be parsing with
:type module: BaseParser
:param data_string: data that we want to parse
:type data_string: str
"""
self.config = config
self.thread_id = module
self.data_string = data_string
threading.Thread.__init__(self)
def run(self):
"""
Executes this parser thread.
"""
parser = self.thread_id(self.config)
self.results = parser.parse(self.data_string) or []
class CahootsParser(object):
"""Kicks off the parsing process"""
config = None
def __init__(self, config=None, bootstrap=False):
"""
The 'config' variable, if set, needs to be a class that extends
BaseConfig, or an instance of a class that does.
In the case that it's a class, we will instantiate the class.
:param config: cahoots config
:type config: BaseConfig
:param bootstrap: Whether we want to auto-bootstrap cahoots
:type bootstrap: boolean
"""
if config is not None:
if inspect.isclass(config) and issubclass(config, BaseConfig):
self.config = config()
elif isinstance(config, BaseConfig):
self.config = config
# Config fallback
if self.config is None:
self.config = BaseConfig()
# This bootstraps our parsing system and
# gets all modules ready for parsing.
if bootstrap:
self.bootstrap(self.config)
@classmethod
def bootstrap(cls, config):
"""
Bootstraps each parser. Can be used for cache warming, etc.
:param config: cahoots config
:type config: BaseConfig
"""
for module in config.enabled_modules:
# If the module overrides the base bootstrap,
# we output a message about it
if module.bootstrap != BaseParser.bootstrap and config.debug:
print(' * ' + time.strftime('%X %x %Z') +
' * Bootstrapping '+module.__name__)
module.bootstrap(config)
def parse(self, data_string):
"""
Parses input data and returns a dict of result data
:param data_string: the string we want to parse
:type data_string: str
:return: yields parse result data if there is any
:rtype: dict
"""
start_time = time.time()
results = []
threads = []
# Creating/starting a thread for each parser module
for module in self.config.enabled_modules:
thread = ParserThread(self.config, module, data_string)
thread.start()
threads.append(thread)
# Synchronizing/finishing parser threads
for thr in threads:
thr.join()
# The threads are done, let's get the results out of them
for thr in threads:
results.extend(thr.results)
# Unique list of all major types
types = list(set([result.type for result in results]))
if results:
# Getting a unique list of result types.
all_types = []
for res in results:
all_types.extend([res.type, res.subtype])
# Hierarchical Confidence Normalization
normalizer_chain = HierarchicalNormalizerChain(
self.config,
types,
list(set(all_types))
)
results = normalizer_chain.normalize(results)
# Sorting our results by confidence value
results = sorted(
results,
key=lambda result: result.confidence,
reverse=True
)
return {
'query': truncate_text(data_string),
'date': datetime.datetime.now(),
'execution_seconds': time.time() - start_time,
'top': results[0] if len(results) > 0 else None,
'results': {
'count': len(results),
'types': types,
'matches': results
}
}
| hickeroar/cahoots | cahoots/parser.py | Python | mit | 5,798 | 0 |
import sys,numpy,matplotlib
import matplotlib.pyplot, scipy.stats
import library
def colorDefiner(epoch):
if epoch == '0':
theColor='blue'
elif epoch == '0.5':
theColor='red'
elif epoch == '1':
theColor='green'
elif epoch == '1.5':
theColor='orange'
else:
print 'error from colorDefiner. exiting...'
sys.exit()
return theColor
def dataGrapherEpochs(dataStructure,figureLabel):
resolution=1000
figureFile='results/figure_%s.pdf'%figureLabel
for epochLabel in dataStructure:
epoch=epochLabel.split('_')[0]
localTime=numpy.array(dataStructure[epochLabel][0])
shiftedTime=localTime-min(localTime)
localCells=dataStructure[epochLabel][1]
highResolutionTime=numpy.linspace(min(shiftedTime),max(shiftedTime),resolution)
epochColor=colorDefiner(epoch)
# plotting the data
if len(localCells) > 1:
matplotlib.pyplot.plot(localTime,localCells,'o',color=epochColor,markeredgecolor='None',ms=4)
# plotting the model if there is growth, otherwise plot a best model straight line
if len(localCells) <= 2:
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[localCells[0],localCells[-1]],'-',color=epochColor)
elif localCells[0] > localCells[-1]:
slope, intercept, temp0, temp1, temp2 = scipy.stats.linregress(shiftedTime,localCells)
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[intercept,slope*shiftedTime[-1]+intercept],'-',color=epochColor)
else:
fittedTrajectory=library.dataFitter(shiftedTime,localCells)
b=library.peval(highResolutionTime,fittedTrajectory[0])
matplotlib.pyplot.plot(highResolutionTime+min(localTime),b,'-',color=epochColor)
matplotlib.pyplot.xlim([-0.5,20])
matplotlib.pyplot.ylim([-0.5e5,18e5])
matplotlib.pyplot.xlabel('time (days)')
matplotlib.pyplot.ylabel('number of cells (x 1e5)')
matplotlib.pyplot.title('%s ppm'%figureLabel)
matplotlib.pyplot.yticks((0,2e5,4e5,6e5,8e5,10e5,12e5,14e5,16e5,18e5),('0','2','4','6','8','10','12','14','16','18'))
matplotlib.pyplot.savefig(figureFile)
matplotlib.pyplot.clf()
return None
### MAIN
# 1. data reading
data300=library.dataReader('data/300ppmSetsLight.v2.txt')
data1000=library.dataReader('data/1000ppmSetsLight.v2.txt')
# 2. fitting the data to sigmoidal function
print 'fitting data for 300 pppm...'
dataGrapherEpochs(data300,'300')
print
print 'fitting data for 1000 ppm...'
dataGrapherEpochs(data1000,'1000')
print '... graphs completed.'
| adelomana/viridis | growthAnalysis/epochGrapher.py | Python | gpl-2.0 | 2,642 | 0.032173 |
#!/usr/bin/env python
# The outputManager synchronizes the output display for all the various threads
#####################
import threading
class outputStruct():
def __init__( self ):
self.id = 0
self.updateObjSem = None
self.title = ""
self.numOfInc = 0
class outputManager( threading.Thread ):
def __init__( self ):
threading.Thread.__init__(self)
self.outputObjs = dict()
self.outputListLock = threading.Lock()
# Used to assign the next id for an output object
self.nextId = 0
self.isAlive = True
def createOutputObj( self, name, numberOfIncrements ):
raise NotImplementedError('Should have implemented this')
def updateOutputObj( self, objectId ):
raise NotImplementedError('Should have implemented this')
def run (self):
raise NotImplementedError('Should have implemented this')
def stop(self):
self.isAlive = False
| alexforsale/manga_downloader | src/outputManager/base.py | Python | mit | 881 | 0.059024 |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_conversation_app_async.py
DESCRIPTION:
This sample demonstrates how to analyze user query for intents and entities using a conversation project.
For more info about how to setup a CLU conversation project, see the README.
USAGE:
python sample_analyze_conversation_app_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource.
2) AZURE_CONVERSATIONS_KEY - your CLU API key.
3) AZURE_CONVERSATIONS_PROJECT - the name of your CLU conversations project.
"""
import asyncio
async def sample_analyze_conversation_app_async():
# [START analyze_conversation_app_async]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations.aio import ConversationAnalysisClient
from azure.ai.language.conversations.models import ConversationAnalysisOptions
# get secrets
conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
conv_key = os.environ["AZURE_CONVERSATIONS_KEY"]
conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"]
# prepare data
query = "One california maki please."
input = ConversationAnalysisOptions(
query=query
)
# analyze query
client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key))
async with client:
result = await client.analyze_conversations(
input,
project_name=conv_project,
deployment_name='production'
)
# view result
print("query: {}".format(result.query))
print("project kind: {}\n".format(result.prediction.project_kind))
print("view top intent:")
print("\ttop intent: {}".format(result.prediction.top_intent))
print("\tcategory: {}".format(result.prediction.intents[0].category))
print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score))
print("view entities:")
for entity in result.prediction.entities:
print("\tcategory: {}".format(entity.category))
print("\ttext: {}".format(entity.text))
print("\tconfidence score: {}".format(entity.confidence_score))
# [END analyze_conversation_app_async]
async def main():
await sample_analyze_conversation_app_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | Azure/azure-sdk-for-python | sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py | Python | mit | 2,671 | 0.003744 |
# Functions to download yesterday's races and associated raceforms from host
import configparser
import requests
import re
import sys
import time
import pymongo
import random
from datetime import datetime, timedelta
from hracing.db import parse_racesheet
from hracing.db import mongo_insert_race
from hracing.tools import delay_scraping
from hracing.tools import shuffle_ids
def download_list_of_races(header,pastdays=3,datestr=None):
""" Fetch a list of all raceIDs and raceURLs listed on host for a given day.
Date is selected either as:
a) pastdays (e.g. pastdays=1 means yesterday).
OR
b) by specifying a datestr of the format YYYY-MM-DD.
Default is to download races from THREE DAYS AGO, which is useful for
data-base building since this avoids unfinished US/CAN races
Returns a lists of raceids and a lists of raceid_urls,
nested in a list of race-locations"""
# Compose URL
if datestr == None:
d = datetime.today()-timedelta(days=int(pastdays))
datestr = d.strftime('%Y-%m-%d')
yesterdayurl = '/races?date=' + datestr
baseurl = 'https://' + header['host']
url = baseurl + yesterdayurl
# Actual download
tpage=requests.get(url)
#Console feedback
print("Time: " + d.strftime(('%Y-%m-%d-%H-%M')))
print("Import race IDs for " + datestr)
print("From " + url)
#Get list of race-locations (TR)
tr_urls_raw=re.split('\<div class\=\"dayHeader\"\>',tpage.text)
tr_urls=re.findall(
'\<div class\=\"meetingRaces\" '
'data-url\=\"(/meetings/meeting\?id\=\d+)\">',
tr_urls_raw[1])
# Loop through race-locations, get raceIDs and urls
raceid_urls=[]
raceids=[]
for tr_url in tr_urls:
url=baseurl+tr_url
temp_race=requests.get(url)
raceid_urls.extend(
re.findall(
'\<li\sclass\=\"raceli\s*status_.*\s*clearfix\"\s*data-url\=\"'
'(\/race\?id\=\d*\&country\=.+\&track\=.*\&date=\d\d\d\d-\d\d-\d\d)\"',
temp_race.text))
raceids.extend(
re.findall(
'\<li\sclass\=\"raceli\s*status_.*\s*clearfix\"\s*data-url\=\"'
'\/race\?id\=(\d*)\&country\=.+\&track\=.*\&date=\d\d\d\d-\d\d-\d\d\"',
temp_race.text))
print("Finished importing raceIDs: " + d.strftime(('%Y-%m-%d-%H-%M')))
return raceids, raceid_urls
def scrape_races(raceids,raceid_urls,header,payload):
""" Fetch a list of all races from host for a given day.
Date is selected either as:
a) pastdays (e.g. pastdays=1 means yesterday).
OR
b) by specifying a datestr of the format YYYY-MM-DD.
Default is to download races from TWO DAYS AGO, which is useful for
data-base building since this avoids US/CAN races are not finished
Return a list of raceids and raceid_urls, which are clustered according to race-location"""
baseurl='https://'+header['host']
race_min_dur=40 # minimum time(s)/race download to avoid getting kicked
form_min_dur=10 # minimum time(s)/form download to avoid getting kicked
reconnect_dur=500 # minimum time ind s to wait before reconnecting after losing connection
d=datetime.today()
a=time.monotonic()
tries=1
#Shuffle order of races
raceids,raceid_urls=shuffle_ids(raceids,raceid_urls)
#Open new session...
with requests.Session() as s:
p = s.post(baseurl+'/auth/validatepostajax',
headers = header,
data=payload)
#For each race location...
for (i, raceid_url) in enumerate(raceid_urls):
if not re.search('"login":true',p.text):
with requests.Session() as s:
p = s.post(baseurl+'/auth/validatepostajax',
headers = header,
data=payload)
try:
#For each single race...
print("Start downloading race_ID: "+raceids[i]+
" ("+str(i) +"/"+str(len(raceid_urls))+")")
#Check current time
start_time=time.monotonic()
#Get current racesheet
racesheet=s.get(baseurl+raceid_url,
headers = header,
cookies=s.cookies)
#Get horseforms urls for that race
horseform_urls=(re.findall("window.open\(\'(.+?)', \'Formguide\'",
racesheet.text))
forms=[]
#Get horseforms-sheets for that race
for (k, horseform_url) in enumerate(horseform_urls):
start_time_2=time.monotonic()
forms.append(s.get(baseurl+horseform_url,
headers = header,
cookies=s.cookies))
delay_scraping(start_time_2,form_min_dur)
# Try parsing current race and add to mogodb. If something fails
# Save race as .txt in folder for troubleshooting.
# UNCOMMENT TRY/EXCEPT WHEN UP AND RUNNING
#try:
race=parse_racesheet(racesheet,forms)
mongo_insert_race(race)
# except Exception as e:
# #Save raw html text to file for debugging purposes, overwrite every time
# errordump='../hracing_private/failed_parsing/'
# rawtextFilename=errordump+str(raceids[i][j])+'.txt'
# print('Error when parsing race_ID: '+str(raceids[i][j])+'. Page saved in '+errordump)
# print('Error msg for '+str(raceids[i][j])+': \n'+str(e))
#
# with open(rawtextFilename, 'wb') as text_file:
# text_file.write(racesheet.content)
delay_scraping(start_time,race_min_dur)# Slow scraping to avoid getting kicked from server.
# Print current runtime, current race, and number of forms extracted
print("Finished: " +str(time.monotonic()-a))
# +" n forms: "+str(len(curr_forms)))
#Exception of Request
except requests.exceptions.RequestException as e:
print(e)
tries=tries+1
time.sleep(reconnect_dur) # wait ten minutes before next try
print("Download exception, trying to continue in 10 mins"
+d.strftime('%Y-%m-%d-%H-%M'))
if tries > 10:
print(str(tries) + "Download exceptions, exiting loop")
break
print("Finished: Download race xmls: "
+ d.strftime('%Y-%m-%d-%H-%M'))
def get_races_IDs_not_in_db(raceids, raceid_urls):
client = pymongo.MongoClient()
db = client.races
race_IDs_db=[]
for race in db.races.find({},{'race_ID':1, '_id': 0}):
race_IDs_db.append(race['race_ID'])
race_zip=zip(raceids,raceid_urls)
filtered_race_zip = filter(lambda x: int(x[0]) not in race_IDs_db,race_zip)
novel_raceids,novel_raceid_urls =zip(*filtered_race_zip)
return list(novel_raceids), list(novel_raceid_urls)
def main():
# get scraping target and login info from config file
configFile='../hracing_private/scraping_payload.ini'
pageConfig = configparser.ConfigParser()
pageConfig.read(configFile)
header=dict(pageConfig['header'])
payload=dict(pageConfig['payload'])
raceids, raceid_urls = download_list_of_races(header)
filtered_raceids, filtered_raceid_urls = get_races_IDs_not_in_db(raceids,raceid_urls)
scrape_races(filtered_raceids, filtered_raceid_urls, header, payload)
if __name__ == "__main__":
main()
| mzunhammer/hracing | hracing/scrape.py | Python | mit | 7,747 | 0.023106 |
"""
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
import json
from nose.plugins.attrib import attr
from studio.base_studio_test import ContainerBase
from ..pages.studio.settings_group_configurations import GroupConfigurationsPage
from ..pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ..fixtures.course import XBlockFixtureDesc
from ..fixtures import LMS_BASE_URL
from ..pages.studio.component_editor import ComponentVisibilityEditorView
from ..pages.lms.instructor_dashboard import InstructorDashboardPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from ..tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from bok_choy.promise import EmptyPromise
@attr('shard_1')
class EndToEndCohortedCoursewareTest(ContainerBase):
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A"
self.content_group_b = "Content Group B"
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "cohort_a_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "cohort_b_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
StudioAutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def enable_cohorting(self, course_fixture):
"""
Enables cohorting for the current course.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': True})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to enable cohorts")
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(problem_index, content_group, second_content_group=None):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems):
LmsAutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem, self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.visible_to_all_problem]
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 4 problems, 1 staff member, and 3 students
When I enable cohorts in the course
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And one student remains in the default cohort
Then the staff member can see all 4 problems
And the student in Cohort A can see all the problems except the one linked to Content Group B
And the student in Cohort B can see all the problems except the one linked to Content Group A
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
| beni55/edx-platform | common/test/acceptance/tests/test_cohorted_courseware.py | Python | agpl-3.0 | 9,754 | 0.003793 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_health_check
description:
- Health Checks determine whether instances are responsive and able to do work.
- They are an important part of a comprehensive load balancing configuration, as they
enable monitoring instances behind load balancers.
- Health Checks poll instances at a specified interval. Instances that do not respond
successfully to some number of probes in a row are marked as unhealthy. No new connections
are sent to unhealthy instances, though existing connections will continue. The
health check will continue to poll unhealthy instances. If an instance later responds
successfully to some number of consecutive probes, it is marked healthy again and
can receive new connections.
short_description: Creates a GCP HealthCheck
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
check_interval_sec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
required: false
default: '5'
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
required: false
default: '2'
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
timeout_sec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater
value than checkIntervalSec.
required: false
default: '5'
aliases:
- timeout_seconds
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
required: false
default: '2'
type:
description:
- Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not
specified, the default is TCP. Exactly one of the protocol-specific health check
field must be specified, which must match type field.
required: false
choices:
- TCP
- SSL
- HTTP
- HTTPS
http_health_check:
description:
- A nested object resource.
required: false
suboptions:
host:
description:
- The value of the host header in the HTTP health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
required: false
request_path:
description:
- The request path of the HTTP health check request.
- The default value is /.
required: false
default: "/"
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
required: false
port:
description:
- The TCP port number for the HTTP health check request.
- The default value is 80.
required: false
port_name:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
required: false
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
required: false
default: NONE
choices:
- NONE
- PROXY_V1
https_health_check:
description:
- A nested object resource.
required: false
suboptions:
host:
description:
- The value of the host header in the HTTPS health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
required: false
request_path:
description:
- The request path of the HTTPS health check request.
- The default value is /.
required: false
default: "/"
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
required: false
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 443.
required: false
port_name:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
required: false
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
required: false
default: NONE
choices:
- NONE
- PROXY_V1
tcp_health_check:
description:
- A nested object resource.
required: false
suboptions:
request:
description:
- The application data to send once the TCP connection has been established
(default value is empty). If both request and response are empty, the connection
establishment alone will indicate health. The request data can only be ASCII.
required: false
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
required: false
port:
description:
- The TCP port number for the TCP health check request.
- The default value is 443.
required: false
port_name:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
required: false
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
required: false
default: NONE
choices:
- NONE
- PROXY_V1
ssl_health_check:
description:
- A nested object resource.
required: false
suboptions:
request:
description:
- The application data to send once the SSL connection has been established
(default value is empty). If both request and response are empty, the connection
establishment alone will indicate health. The request data can only be ASCII.
required: false
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
required: false
port:
description:
- The TCP port number for the SSL health check request.
- The default value is 443.
required: false
port_name:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
required: false
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
required: false
default: NONE
choices:
- NONE
- PROXY_V1
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/latest/healthChecks)'
- 'Official Documentation: U(https://cloud.google.com/load-balancing/docs/health-checks)'
'''
EXAMPLES = '''
- name: create a health check
gcp_compute_health_check:
name: "test_object"
type: TCP
tcp_health_check:
port_name: service-health
request: ping
response: pong
healthy_threshold: 10
timeout_sec: 2
unhealthy_threshold: 5
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
checkIntervalSec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
healthyThreshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
returned: success
type: int
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
timeoutSec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater value
than checkIntervalSec.
returned: success
type: int
unhealthyThreshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
returned: success
type: int
type:
description:
- Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not
specified, the default is TCP. Exactly one of the protocol-specific health check
field must be specified, which must match type field.
returned: success
type: str
httpHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
host:
description:
- The value of the host header in the HTTP health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
returned: success
type: str
requestPath:
description:
- The request path of the HTTP health check request.
- The default value is /.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the HTTP health check request.
- The default value is 80.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
httpsHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
host:
description:
- The value of the host header in the HTTPS health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
returned: success
type: str
requestPath:
description:
- The request path of the HTTPS health check request.
- The default value is /.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
tcpHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
request:
description:
- The application data to send once the TCP connection has been established
(default value is empty). If both request and response are empty, the connection
establishment alone will indicate health. The request data can only be ASCII.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the TCP health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
sslHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
request:
description:
- The application data to send once the SSL connection has been established
(default value is empty). If both request and response are empty, the connection
establishment alone will indicate health. The request data can only be ASCII.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the SSL health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
check_interval_sec=dict(default=5, type='int'),
description=dict(type='str'),
healthy_threshold=dict(default=2, type='int'),
name=dict(required=True, type='str'),
timeout_sec=dict(default=5, type='int', aliases=['timeout_seconds']),
unhealthy_threshold=dict(default=2, type='int'),
type=dict(type='str', choices=['TCP', 'SSL', 'HTTP', 'HTTPS']),
http_health_check=dict(type='dict', options=dict(
host=dict(type='str'),
request_path=dict(default='/', type='str'),
response=dict(type='str'),
port=dict(type='int'),
port_name=dict(type='str'),
proxy_header=dict(default='NONE', type='str', choices=['NONE', 'PROXY_V1'])
)),
https_health_check=dict(type='dict', options=dict(
host=dict(type='str'),
request_path=dict(default='/', type='str'),
response=dict(type='str'),
port=dict(type='int'),
port_name=dict(type='str'),
proxy_header=dict(default='NONE', type='str', choices=['NONE', 'PROXY_V1'])
)),
tcp_health_check=dict(type='dict', options=dict(
request=dict(type='str'),
response=dict(type='str'),
port=dict(type='int'),
port_name=dict(type='str'),
proxy_header=dict(default='NONE', type='str', choices=['NONE', 'PROXY_V1'])
)),
ssl_health_check=dict(type='dict', options=dict(
request=dict(type='str'),
response=dict(type='str'),
port=dict(type='int'),
port_name=dict(type='str'),
proxy_header=dict(default='NONE', type='str', choices=['NONE', 'PROXY_V1'])
))
),
mutually_exclusive=[['http_health_check', 'https_health_check', 'ssl_health_check', 'tcp_health_check']]
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#healthCheck'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#healthCheck',
u'checkIntervalSec': module.params.get('check_interval_sec'),
u'description': module.params.get('description'),
u'healthyThreshold': module.params.get('healthy_threshold'),
u'name': module.params.get('name'),
u'timeoutSec': module.params.get('timeout_sec'),
u'unhealthyThreshold': module.params.get('unhealthy_threshold'),
u'type': module.params.get('type'),
u'httpHealthCheck': HealthCheckHttphealthcheck(module.params.get('http_health_check', {}), module).to_request(),
u'httpsHealthCheck': HealthCheckHttpshealthcheck(module.params.get('https_health_check', {}), module).to_request(),
u'tcpHealthCheck': HealthCheckTcphealthcheck(module.params.get('tcp_health_check', {}), module).to_request(),
u'sslHealthCheck': HealthCheckSslhealthcheck(module.params.get('ssl_health_check', {}), module).to_request()
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/healthChecks/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/healthChecks".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'checkIntervalSec': response.get(u'checkIntervalSec'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'healthyThreshold': response.get(u'healthyThreshold'),
u'id': response.get(u'id'),
u'name': module.params.get('name'),
u'timeoutSec': response.get(u'timeoutSec'),
u'unhealthyThreshold': response.get(u'unhealthyThreshold'),
u'type': response.get(u'type'),
u'httpHealthCheck': HealthCheckHttphealthcheck(response.get(u'httpHealthCheck', {}), module).from_response(),
u'httpsHealthCheck': HealthCheckHttpshealthcheck(response.get(u'httpsHealthCheck', {}), module).from_response(),
u'tcpHealthCheck': HealthCheckTcphealthcheck(response.get(u'tcpHealthCheck', {}), module).from_response(),
u'sslHealthCheck': HealthCheckSslhealthcheck(response.get(u'sslHealthCheck', {}), module).from_response()
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#healthCheck')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class HealthCheckHttphealthcheck(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'host': self.request.get('host'),
u'requestPath': self.request.get('request_path'),
u'response': self.request.get('response'),
u'port': self.request.get('port'),
u'portName': self.request.get('port_name'),
u'proxyHeader': self.request.get('proxy_header')
})
def from_response(self):
return remove_nones_from_dict({
u'host': self.request.get(u'host'),
u'requestPath': self.request.get(u'requestPath'),
u'response': self.request.get(u'response'),
u'port': self.request.get(u'port'),
u'portName': self.request.get(u'portName'),
u'proxyHeader': self.request.get(u'proxyHeader')
})
class HealthCheckHttpshealthcheck(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'host': self.request.get('host'),
u'requestPath': self.request.get('request_path'),
u'response': self.request.get('response'),
u'port': self.request.get('port'),
u'portName': self.request.get('port_name'),
u'proxyHeader': self.request.get('proxy_header')
})
def from_response(self):
return remove_nones_from_dict({
u'host': self.request.get(u'host'),
u'requestPath': self.request.get(u'requestPath'),
u'response': self.request.get(u'response'),
u'port': self.request.get(u'port'),
u'portName': self.request.get(u'portName'),
u'proxyHeader': self.request.get(u'proxyHeader')
})
class HealthCheckTcphealthcheck(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'request': self.request.get('request'),
u'response': self.request.get('response'),
u'port': self.request.get('port'),
u'portName': self.request.get('port_name'),
u'proxyHeader': self.request.get('proxy_header')
})
def from_response(self):
return remove_nones_from_dict({
u'request': self.request.get(u'request'),
u'response': self.request.get(u'response'),
u'port': self.request.get(u'port'),
u'portName': self.request.get(u'portName'),
u'proxyHeader': self.request.get(u'proxyHeader')
})
class HealthCheckSslhealthcheck(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'request': self.request.get('request'),
u'response': self.request.get('response'),
u'port': self.request.get('port'),
u'portName': self.request.get('port_name'),
u'proxyHeader': self.request.get('proxy_header')
})
def from_response(self):
return remove_nones_from_dict({
u'request': self.request.get(u'request'),
u'response': self.request.get(u'response'),
u'port': self.request.get(u'port'),
u'portName': self.request.get(u'portName'),
u'proxyHeader': self.request.get(u'proxyHeader')
})
if __name__ == '__main__':
main()
| orgito/ansible | lib/ansible/modules/cloud/google/gcp_compute_health_check.py | Python | gpl-3.0 | 30,735 | 0.003677 |
# -*- coding: utf-8 -*-
import requests
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth import Auth
from website import util
from website import settings
from website.project import new_node
from website.models import Node, MailRecord
def record_message(message, nodes_created, users_created):
record = MailRecord.objects.create(
data=message.raw,
)
record.users_created.add(*users_created),
record.nodes_created.add(*nodes_created)
record.save()
def get_or_create_node(title, user):
"""Get or create node by title and creating user.
:param str title: Node title
:param User user: User creating node
:return: Tuple of (node, created)
"""
try:
node = Node.find_one(
Q('title', 'iexact', title)
& Q('contributors', 'eq', user)
)
return node, False
except ModularOdmException:
node = new_node('project', title, user)
return node, True
def provision_node(conference, message, node, user):
"""
:param Conference conference:
:param ConferenceMessage message:
:param Node node:
:param User user:
"""
auth = Auth(user=user)
node.update_node_wiki('home', message.text, auth)
if conference.admins.exists():
node.add_contributors(prepare_contributors(conference.admins.all()), log=False)
if not message.is_spam and conference.public_projects:
node.set_privacy('public', meeting_creation=True, auth=auth)
node.add_tag(message.conference_name, auth=auth)
node.add_tag(message.conference_category, auth=auth)
for systag in ['emailed', message.conference_name, message.conference_category]:
node.add_system_tag(systag, save=False)
if message.is_spam:
node.add_system_tag('spam', save=False)
node.save()
def prepare_contributors(admins):
return [
{
'user': admin,
'permissions': ['read', 'write', 'admin'],
'visible': False,
}
for admin in admins
]
def upload_attachment(user, node, attachment):
attachment.seek(0)
name = '/' + (attachment.filename or settings.MISSING_FILE_NAME)
content = attachment.read()
upload_url = util.waterbutler_url_for('upload', 'osfstorage', name, node, user=user, _internal=True)
requests.put(
upload_url,
data=content,
)
def upload_attachments(user, node, attachments):
for attachment in attachments:
upload_attachment(user, node, attachment)
| monikagrabowska/osf.io | website/conferences/utils.py | Python | apache-2.0 | 2,561 | 0.001171 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from future import Future
from object_store import ObjectStore
class TestObjectStore(ObjectStore):
'''An object store which records its namespace and behaves like a dict, for
testing.
'''
def __init__(self, namespace):
self.namespace = namespace
self._store = {}
def SetMulti(self, mapping, **optarg):
self._store.update(mapping)
def GetMulti(self, keys, **optargs):
return Future(value=dict((k, v) for k, v in self._store.items()
if k in keys))
def Delete(self, key):
del self._store[key]
| codenote/chromium-test | chrome/common/extensions/docs/server2/test_object_store.py | Python | bsd-3-clause | 727 | 0.009629 |
input = """
q(a,b,c).
q(b,c,a).
q(c,b,c).
q(b,b,c).
q(a,b,b).
q(c,a,a).
s(c,b).
s(a,b).
s(a,c).
s(c,c).
t(a).
t(b).
w(b,c).
w(b,b).
w(a,a).
p(X,Y) :- q(X,b,Z), r(Z,b,Y), not r(X,Y,Z).
m(X,Y) :- u(a,X,Y,Y,c,X).
v(X) :- s(a,X), not t(X).
n(X,X) :- q(X,b,X).
r(X,Y,Z) :- t(a), s(X,Z), w(X,Y), not p(Z,Y).
"""
output = """
{n(c,c), q(a,b,b), q(a,b,c), q(b,b,c), q(b,c,a), q(c,a,a), q(c,b,c), r(a,a,b), r(a,a,c), s(a,b), s(a,c), s(c,b), s(c,c), t(a), t(b), v(c), w(a,a), w(b,b), w(b,c)}
"""
| Yarrick13/hwasp | tests/wasp1/AllAnswerSets/bug_09.test.py | Python | apache-2.0 | 491 | 0.002037 |
# -*- coding: utf-8 -*-
#
# twoneurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
# matplotlib.use("macosx")
import pylab
import nest
import nest.voltage_trace
weight=20.0
delay=1.0
stim=1000.0
neuron1 = nest.Create("iaf_neuron")
neuron2 = nest.Create("iaf_neuron")
voltmeter = nest.Create("voltmeter")
nest.SetStatus(neuron1, {"I_e": stim})
nest.Connect(neuron1, neuron2, syn_spec={'weight':weight, 'delay':delay})
nest.Connect(voltmeter, neuron2)
nest.Simulate(100.0)
nest.voltage_trace.from_device(voltmeter)
nest.voltage_trace.show()
| kristoforcarlson/nest-simulator-fork | pynest/examples/twoneurons.py | Python | gpl-2.0 | 1,209 | 0.004136 |
# -*- coding: utf-8 -*-
""" A context manager for managing things injected into __builtin__.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
#-----------------------------------------------------------------------------
# Authors:
#
# * Brian Granger
# * Fernando Perez
#
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# Complete license in the file documentation/BSDLicense_IPython.md,
# distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import builtins as builtin_mod
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class __BuiltinUndefined: pass
BuiltinUndefined = __BuiltinUndefined()
class __HideBuiltin: pass
HideBuiltin = __HideBuiltin()
class BuiltinTrap:
""" Protect builtins from code in some environment. """
def __init__(self):
self._orig_builtins = {}
# We define this to track if a single BuiltinTrap is nested.
# Only turn off the trap when the outermost call to __exit__ is made.
self._nested_level = 0
# builtins we always add - if set to HideBuiltin, they will just
# be removed instead of being replaced by something else
self.auto_builtins = {'exit': HideBuiltin,
'quit': HideBuiltin,
}
def __enter__(self):
""" Enter a code segment that should not chane builtins """
if self._nested_level == 0:
self.activate()
self._nested_level += 1
# I return self, so callers can use add_builtin in a with clause.
return self
def __exit__(self, type, value, traceback):
""" Leave a code segment that should not change builtins
@param type:
@prarm value:
@param traceback:
"""
if self._nested_level == 1:
self.deactivate()
self._nested_level -= 1
# Returning False will cause exceptions to propagate
return False
def add_builtin(self, key, value):
"""Add a builtin and save the original."""
bdict = builtin_mod.__dict__
orig = bdict.get(key, BuiltinUndefined)
if value is HideBuiltin:
if orig is not BuiltinUndefined: #same as 'key in bdict'
self._orig_builtins[key] = orig
del bdict[key]
else:
self._orig_builtins[key] = orig
bdict[key] = value
def remove_builtin(self, key, orig):
"""Remove an added builtin and re-set the original."""
if orig is BuiltinUndefined:
del builtin_mod.__dict__[key]
else:
builtin_mod.__dict__[key] = orig
def activate(self):
"""Store ipython references in the __builtin__ namespace."""
add_builtin = self.add_builtin
for name, func in iter(self.auto_builtins.items()):
add_builtin(name, func)
def deactivate(self):
"""Remove any builtins which might have been added by add_builtins, or
restore overwritten ones to their previous values."""
remove_builtin = self.remove_builtin
for key, val in iter(self._orig_builtins.items()):
remove_builtin(key, val)
self._orig_builtins.clear()
self._builtins_added = False
| drogenlied/qudi | logic/jupyterkernel/builtin_trap.py | Python | gpl-3.0 | 4,344 | 0.003913 |
from utils.face import Face
import pygame
from utils.message import Message
from utils.alarm import Alarm
class Button(pygame.sprite.Sprite):
def __init__(self, rect, color=(0,0,255), action=None):
pygame.sprite.Sprite.__init__(self)
self.color = color
self.action = action
self.rect = pygame.Rect(rect)
self.baseImage = pygame.Surface((self.rect.width, self.rect.height))
self.image = self.baseImage
def update(self):
rect = self.baseImage.get_rect()
pygame.draw.circle(self.baseImage, self.color, rect.center, rect.width/2, 1);
def touchDown(self):
rect = self.baseImage.get_rect()
pygame.draw.circle(self.baseImage, self.color, rect.center, rect.width/2, 0);
def touchUp(self):
rect = self.baseImage.get_rect()
self.image.fill(pygame.Color("black"))
pygame.draw.circle(self.baseImage, self.color, rect.center, rect.width/2, 1);
if self.action is not None:
self.action()
def setAction(self, action):
self.action = action
class Line(Face):
def __init__(self, rect, color=(0,0,255), text=""):
pygame.sprite.Sprite.__init__(self)
self._alarmList = {}
self.color = color
self.rect = pygame.Rect(rect)
self.text = text
self.baseImage = pygame.Surface((self.rect.width, self.rect.height))
self.image = self.baseImage
self.faceSprite = pygame.sprite.GroupSingle(Message((self.text,), vector=(0,0), fontsize=45, align="left", padding=0, fgcolor=(0,0,255)))
surfaceRect = self.image.get_rect()
self.faceSprite.sprite.rect.midleft = surfaceRect.midleft
def update(self):
self.faceSprite.draw(self.baseImage)
class AlarmSetting(Face):
def __init__(self, rect, alarm, color=(0,0,255)):
pygame.sprite.Sprite.__init__(self)
self._alarmList = {}
if isinstance(alarm, Alarm):
self._alarmObject = alarm
else:
raise Exception("Not an Alarm-class object")
self.color = color
self.rect = pygame.Rect(rect)
self.requestingFace = False
self.baseImage = pygame.Surface((self.rect.width, self.rect.height))
self.image = self.baseImage
self._lines = []
for i in range(4):
line = pygame.sprite.GroupSingle(Line(pygame.Rect((0, 0),(rect.height/5*4, rect.height/5)), text="Hello"))
line.sprite.rect.topright = (rect.width, rect.height/4*i)
self._lines.append(line)
def addAlarm(self):
line = pygame.sprite.GroupSingle(Button(pygame.Rect((0, 0),(self.rect.height/5, self.rect.height/5))))
line.sprite.rect.topright = (self.rect.width, self.rect.height/4)
line.sprite.setAction(self.addAlarm)
self._lines.append(line)
def update(self):
for line in self._lines:
line.update()
# line.sprite.rect.midbottom = self.image.get_rect()
line.draw(self.baseImage)
def handleEvent(self, event):
pos = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
for butt in self._lines:
if butt.sprite.rect.collidepoint(pos):
butt.sprite.touchDown()
if event.type == pygame.MOUSEBUTTONUP:
for butt in self._lines:
if butt.sprite.rect.collidepoint(pos):
butt.sprite.touchUp()
| khan-git/pialarmclock | faces/alarmsetting.py | Python | mit | 3,537 | 0.009047 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from katello.client.api.base import KatelloAPI
from katello.client.lib.utils.data import update_dict_unless_none
class ContentViewAPI(KatelloAPI):
"""
Connection class to access content_view calls
"""
def content_views_by_org(self, org_id, env=None):
path = "/api/organizations/%s/content_views" % org_id
params = {"environment_id": env["id"]} if env else {}
views = self.server.GET(path, params)[1]
return views
def views_by_label_name_or_id(self, org_id, label=None, name=None, vid=None):
params = {}
update_dict_unless_none(params, "name", name)
update_dict_unless_none(params, "label", label)
update_dict_unless_none(params, "id", vid)
path = "/api/organizations/%s/content_views" % org_id
views = self.server.GET(path, params)[1]
return views
def show(self, org_name, view_id, environment_id=None):
path = "/api/organizations/%s/content_views/%s" % (org_name, view_id)
params = {"environment_id": environment_id}
view = self.server.GET(path, params)[1]
return view
def content_view_by_label(self, org_id, view_label):
path = "/api/organizations/%s/content_views/" % (org_id)
views = self.server.GET(path, {"label": view_label})[1]
if len(views) > 0:
return views[0]
else:
return None
def update(self, org_id, cv_id, label, description):
view = {}
view = update_dict_unless_none(view, "label", label)
view = update_dict_unless_none(view, "description", description)
path = "/api/organizations/%s/content_views/%s" % (org_id, cv_id)
return self.server.PUT(path, {"content_view": view})[1]
def delete(self, org_id, cv_id):
path = "/api/organizations/%s/content_views/%s" % (org_id, cv_id)
return self.server.DELETE(path)[1]
def promote(self, cv_id, env_id):
path = "/api/content_views/%s/promote" % cv_id
params = {"environment_id": env_id}
return self.server.POST(path, params)[1]
def refresh(self, cv_id):
path = "/api/content_views/%s/refresh" % cv_id
return self.server.POST(path, {})[1]
| Katello/katello-cli | src/katello/client/api/content_view.py | Python | gpl-2.0 | 2,845 | 0.000703 |
# Kata link: https://www.codewars.com/kata/58daa7617332e59593000006
# First solution
def find_longest(arr):
count = [len(str(v)) for v in arr]
max_value = max(count)
max_index = count.index(max_value)
return arr[max_index]
# Another solution
def find_longest(arr):
return max(arr, key=lambda x: len(str(x)))
| chyumin/Codewars | Python/7 kyu/Most Digits.py | Python | mit | 330 | 0.00303 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
Sending messages to the AMQ
"""
import json
import logging
import random
try:
from proton import Message, SSLDomain
from proton.handlers import MessagingHandler
from proton.reactor import Container
has_proton = True
except ImportError:
has_proton = False
MessagingHandler = object
# XXX replace turbogears with beaker prefixed flask when migration is done
from turbogears.config import get
log = logging.getLogger(__name__)
# Taken from rhmsg
class TimeoutHandler(MessagingHandler):
def __init__(self, url, conf, msgs, *args, **kws):
super(TimeoutHandler, self).__init__(*args, **kws)
self.url = url
self.conf = conf
self.msgs = msgs
self.pending = {}
def on_start(self, event):
log.debug('Container starting')
event.container.connected = False
event.container.error_msgs = []
if 'cert' in self.conf and 'key' in self.conf and 'cacert' in self.conf:
ssl = SSLDomain(SSLDomain.MODE_CLIENT)
ssl.set_credentials(self.conf['cert'], self.conf['key'], None)
ssl.set_trusted_ca_db(self.conf['cacert'])
ssl.set_peer_authentication(SSLDomain.VERIFY_PEER)
else:
ssl = None
log.debug('connecting to %s', self.url)
event.container.connect(url=self.url, reconnect=False, ssl_domain=ssl)
connect_timeout = self.conf['connect_timeout']
self.connect_task = event.container.schedule(connect_timeout, self)
send_timeout = self.conf['send_timeout']
self.timeout_task = event.container.schedule(send_timeout, self)
def on_timer_task(self, event):
if not event.container.connected:
log.error('not connected, stopping container')
if self.timeout_task:
self.timeout_task.cancel()
self.timeout_task = None
event.container.stop()
else:
# This should only run when called from the timeout task
log.error('send timeout expired with %s messages unsent, stopping container',
len(self.msgs))
event.container.stop()
def on_connection_opened(self, event):
event.container.connected = True
self.connect_task.cancel()
self.connect_task = None
log.debug('connection to %s opened successfully', event.connection.hostname)
self.send_msgs(event)
def on_connection_closed(self, event):
log.debug('disconnected from %s', event.connection.hostname)
def send_msgs(self, event):
sender = event.container.create_sender(event.connection, target=self.conf['address'])
for msg in self.msgs:
delivery = sender.send(msg)
log.debug('sent msg: %s', msg.properties)
self.pending[delivery] = msg
sender.close()
def update_pending(self, event):
msg = self.pending[event.delivery]
del self.pending[event.delivery]
log.debug('removed message from self.pending: %s', msg.properties)
if not self.pending:
if self.msgs:
log.error('%s messages unsent (rejected or released)', len(self.msgs))
else:
log.debug('all messages sent successfully')
if self.timeout_task:
log.debug('canceling timeout task')
self.timeout_task.cancel()
self.timeout_task = None
log.debug('closing connection to %s', event.connection.hostname)
event.connection.close()
def on_settled(self, event):
msg = self.pending[event.delivery]
self.msgs.remove(msg)
log.debug('removed message from self.msgs: %s', msg.properties)
self.update_pending(event)
def on_rejected(self, event):
msg = self.pending[event.delivery]
log.error('message was rejected: %s', msg.properties)
self.update_pending(event)
def on_released(self, event):
msg = self.pending[event.delivery]
log.error('message was released: %s', msg.properties)
self.update_pending(event)
def on_transport_tail_closed(self, event):
if self.connect_task:
log.debug('canceling connect timer')
self.connect_task.cancel()
self.connect_task = None
if self.timeout_task:
log.debug('canceling send timer')
self.timeout_task.cancel()
self.timeout_task = None
def handle_error(self, objtype, event, level=logging.ERROR):
endpoint = getattr(event, objtype, None)
condition = (getattr(endpoint, 'remote_condition', None)
or getattr(endpoint, 'condition', None))
if condition:
name = condition.name
desc = condition.description
log.log(level, '%s error: %s: %s', objtype, name, desc)
else:
name = '{0} error'.format(objtype)
desc = 'unspecified'
log.log(level, 'unspecified %s error', objtype)
event.container.error_msgs.append((self.url, name, desc))
def on_connection_error(self, event):
self.handle_error('connection', event)
def on_session_error(self, event):
self.handle_error('session', event)
def on_link_error(self, event):
self.handle_error('link', event)
log.error('closing connection to: %s', event.connection.hostname)
event.connection.close()
def on_transport_error(self, event):
"""
Implement this handler with the same logic as the default handler in
MessagingHandler, but log to our logger at INFO level, instead of the
root logger with WARNING level.
"""
self.handle_error('transport', event, level=logging.INFO)
if (event.transport
and event.transport.condition
and event.transport.condition.name in self.fatal_conditions):
log.error('closing connection to: %s', event.connection.hostname)
event.connection.close()
class AMQProducer(object):
def __init__(self, host=None, port=None,
urls=None,
certificate=None, private_key=None,
trusted_certificates=None,
topic=None,
timeout=60):
if isinstance(urls, (list, tuple)):
pass
elif urls:
urls = [urls]
elif host:
urls = ['amqps://{0}:{1}'.format(host, port or 5671)]
else:
raise RuntimeError('either host or urls must be specified')
self.urls = urls
self.conf = {
'cert': certificate,
'key': private_key,
'cacert': trusted_certificates,
'connect_timeout': timeout,
'send_timeout': timeout
}
if topic:
self.through_topic(topic)
else:
self.address = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def through_topic(self, address):
self.address = self.build_address('topic', address)
@staticmethod
def build_address(channel, address):
return '{0}://{1}'.format(channel, address)
def _send_all(self, messages):
messages = list(messages)
errors = []
for url in sorted(self.urls, key=lambda k: random.random()):
container = Container(TimeoutHandler(url, self.conf, messages))
container.run()
errors.extend(container.error_msgs)
if not messages:
break
else:
error_strs = ['{0}: {1}: {2}'.format(*e) for e in errors]
raise RuntimeError('could not send {0} message{1} to any destinations, '
'errors:\n{2}'.format(len(messages),
's' if len(messages) != 1 else '',
'\n'.join(error_strs)))
def send(self, *messages):
"""
Send a list of messages.
Each argument is a proton.Message.
"""
assert self.address, 'Must call through_queue or through_topic in advance.'
self.conf['address'] = self.address
self._send_all(messages)
def _build_msg(self, props, body, attrs=None):
"""
Build and return a proton.Message.
Arguments:
props (dict): Message properties
body (object): Message body
attrs (dict): Attributes to set on the message.
"""
msg = Message(properties=props, body=body)
if attrs:
for name, value in attrs.items():
setattr(msg, name, value)
return msg
def send_msg(self, props, body, **kws):
"""
Send a single message.
Arguments:
props (dict): Message properties
body (str): Message body. Should be utf-8 encoded text.
Any keyword arguments will be treated as attributes to set on the
underlying Message.
"""
msg = self._build_msg(props, body, kws)
self.send(msg)
def send_msgs(self, messages):
"""
Send a list of messages.
Arguments:
messages (list): A list of 2-element lists/tuples.
tuple[0]: A dict of message headers.
tuple[1]: Message body. Should be utf-8 encoded text.
If the tuple has a third element, it is treated as a dict containing
attributes to be set on the underlying Message.
"""
msgs = []
for message in messages:
msgs.append(self._build_msg(*message))
self.send(*msgs)
class BeakerMessenger(object):
__instance = None
def __new__(cls, *args, **kwargs):
if not cls.__instance:
cls.__instance = BeakerMessenger.__BeakerMessenger()
return cls.__instance
class __BeakerMessenger:
def __init__(self):
url = get('amq.url')
cert = get('amq.cert')
key = get('amq.key')
cacerts = get('amq.cacerts')
topic = get('amq.topic_prefix')
self.producer = AMQProducer(urls=url,
certificate=cert,
private_key=key,
trusted_certificates=cacerts,
topic=topic)
def send(self, header, body):
try:
self.producer.send_msg(header, body)
except RuntimeError as e:
log.exception(e)
def _messenger_enabled():
if _messenger_enabled.res is False:
_messenger_enabled.res = bool(
has_proton
and get('amq.url')
and get('amq.cert')
and get('amq.key')
and get('amq.cacerts')
)
return _messenger_enabled.res
_messenger_enabled.res = False
def send_scheduler_update(obj):
if not _messenger_enabled():
return
data = obj.minimal_json_content()
_send_payload(obj.task_info(), data)
def _send_payload(header, body):
bkr_msg = BeakerMessenger()
bkr_msg.send(header, json.dumps(body, default=str)) # pylint: disable=no-member
| beaker-project/beaker | Server/bkr/server/messaging.py | Python | gpl-2.0 | 11,553 | 0.000866 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: David Lapsley <dlapsley@nicira.com>, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
from abc import ABCMeta
import httplib
import logging
import time
from neutron.plugins.nicira.api_client.common import (
_conn_str)
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
#Default parameters.
GENERATION_ID_TIMEOUT = -1
DEFAULT_CONCURRENT_CONNECTIONS = 3
DEFAULT_CONNECT_TIMEOUT = 5
class NvpApiClient(object):
'''An abstract baseclass for all NvpApiClient implementations.
This defines the interface and property structure for synchronous and
coroutine-based classes.
'''
__metaclass__ = ABCMeta
CONN_IDLE_TIMEOUT = 60 * 15
def _create_connection(self, host, port, is_ssl):
if is_ssl:
return httplib.HTTPSConnection(host, port,
timeout=self._connect_timeout)
return httplib.HTTPConnection(host, port,
timeout=self._connect_timeout)
@staticmethod
def _conn_params(http_conn):
is_ssl = isinstance(http_conn, httplib.HTTPSConnection)
return (http_conn.host, http_conn.port, is_ssl)
@property
def user(self):
return self._user
@property
def password(self):
return self._password
@property
def nvp_config_gen(self):
# If nvp_gen_timeout is not -1 then:
# Maintain a timestamp along with the generation ID. Hold onto the
# ID long enough to be useful and block on sequential requests but
# not long enough to persist when Onix db is cleared, which resets
# the generation ID, causing the DAL to block indefinitely with some
# number that's higher than the cluster's value.
if self._nvp_gen_timeout != -1:
ts = self._nvp_config_gen_ts
if ts is not None:
if (time.time() - ts) > self._nvp_gen_timeout:
return None
return self._nvp_config_gen
@nvp_config_gen.setter
def nvp_config_gen(self, value):
if self._nvp_config_gen != value:
if self._nvp_gen_timeout != -1:
self._nvp_config_gen_ts = time.time()
self._nvp_config_gen = value
def auth_cookie(self, conn):
cookie = None
data = self._get_provider_data(conn)
if data:
cookie = data[1]
return cookie
def set_auth_cookie(self, conn, cookie):
data = self._get_provider_data(conn)
if data:
self._set_provider_data(conn, (data[0], cookie))
def acquire_connection(self, auto_login=True, headers=None, rid=-1):
'''Check out an available HTTPConnection instance.
Blocks until a connection is available.
:auto_login: automatically logins before returning conn
:headers: header to pass on to login attempt
:param rid: request id passed in from request eventlet.
:returns: An available HTTPConnection instance or None if no
api_providers are configured.
'''
if not self._api_providers:
LOG.warn(_("[%d] no API providers currently available."), rid)
return None
if self._conn_pool.empty():
LOG.debug(_("[%d] Waiting to acquire API client connection."), rid)
priority, conn = self._conn_pool.get()
now = time.time()
if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT:
LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
"seconds; reconnecting."),
{'rid': rid, 'conn': _conn_str(conn),
'sec': now - conn.last_used})
conn = self._create_connection(*self._conn_params(conn))
conn.last_used = now
conn.priority = priority # stash current priority for release
qsize = self._conn_pool.qsize()
LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
"connection(s) available."),
{'rid': rid, 'conn': _conn_str(conn), 'qsize': qsize})
if auto_login and self.auth_cookie(conn) is None:
self._wait_for_login(conn, headers)
return conn
def release_connection(self, http_conn, bad_state=False,
service_unavail=False, rid=-1):
'''Mark HTTPConnection instance as available for check-out.
:param http_conn: An HTTPConnection instance obtained from this
instance.
:param bad_state: True if http_conn is known to be in a bad state
(e.g. connection fault.)
:service_unavail: True if http_conn returned 503 response.
:param rid: request id passed in from request eventlet.
'''
conn_params = self._conn_params(http_conn)
if self._conn_params(http_conn) not in self._api_providers:
LOG.debug(_("[%(rid)d] Released connection %(conn)s is not an "
"API provider for the cluster"),
{'rid': rid, 'conn': _conn_str(http_conn)})
return
elif hasattr(http_conn, "no_release"):
return
if bad_state:
# Reconnect to provider.
LOG.warn(_("[%(rid)d] Connection returned in bad state, "
"reconnecting to %(conn)s"),
{'rid': rid, 'conn': _conn_str(http_conn)})
http_conn = self._create_connection(*self._conn_params(http_conn))
priority = self._next_conn_priority
self._next_conn_priority += 1
elif service_unavail:
# http_conn returned a service unaviable response, put other
# connections to the same controller at end of priority queue,
conns = []
while not self._conn_pool.empty():
priority, conn = self._conn_pool.get()
if self._conn_params(conn) == conn_params:
priority = self._next_conn_priority
self._next_conn_priority += 1
conns.append((priority, conn))
for priority, conn in conns:
self._conn_pool.put((priority, conn))
# put http_conn at end of queue also
priority = self._next_conn_priority
self._next_conn_priority += 1
else:
priority = http_conn.priority
self._conn_pool.put((priority, http_conn))
LOG.debug(_("[%(rid)d] Released connection %(conn)s. %(qsize)d "
"connection(s) available."),
{'rid': rid, 'conn': _conn_str(http_conn),
'qsize': self._conn_pool.qsize()})
def _wait_for_login(self, conn, headers=None):
'''Block until a login has occurred for the current API provider.'''
data = self._get_provider_data(conn)
if data is None:
LOG.error(_("Login request for an invalid connection: '%s'"),
_conn_str(conn))
return
provider_sem = data[0]
if provider_sem.acquire(blocking=False):
try:
cookie = self._login(conn, headers)
self.set_auth_cookie(conn, cookie)
finally:
provider_sem.release()
else:
LOG.debug(_("Waiting for auth to complete"))
# Wait until we can aquire then release
provider_sem.acquire(blocking=True)
provider_sem.release()
def _get_provider_data(self, conn_or_conn_params, default=None):
"""Get data for specified API provider.
Args:
conn_or_conn_params: either a HTTP(S)Connection object or the
resolved conn_params tuple returned by self._conn_params().
default: conn_params if ones passed aren't known
Returns: Data associated with specified provider
"""
conn_params = self._normalize_conn_params(conn_or_conn_params)
return self._api_provider_data.get(conn_params, default)
def _set_provider_data(self, conn_or_conn_params, data):
"""Set data for specified API provider.
Args:
conn_or_conn_params: either a HTTP(S)Connection object or the
resolved conn_params tuple returned by self._conn_params().
data: data to associate with API provider
"""
conn_params = self._normalize_conn_params(conn_or_conn_params)
if data is None:
del self._api_provider_data[conn_params]
else:
self._api_provider_data[conn_params] = data
def _normalize_conn_params(self, conn_or_conn_params):
"""Normalize conn_param tuple.
Args:
conn_or_conn_params: either a HTTP(S)Connection object or the
resolved conn_params tuple returned by self._conn_params().
Returns: Normalized conn_param tuple
"""
if (not isinstance(conn_or_conn_params, tuple) and
not isinstance(conn_or_conn_params, httplib.HTTPConnection)):
LOG.debug(_("Invalid conn_params value: '%s'"),
str(conn_or_conn_params))
return conn_or_conn_params
if isinstance(conn_or_conn_params, httplib.HTTPConnection):
conn_params = self._conn_params(conn_or_conn_params)
else:
conn_params = conn_or_conn_params
host, port, is_ssl = conn_params
if port is None:
port = 443 if is_ssl else 80
return (host, port, is_ssl)
| netscaler/neutron | neutron/plugins/nicira/api_client/client.py | Python | apache-2.0 | 10,210 | 0.000196 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from saml2 import BINDING_SOAP, BINDING_URI
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_ARTIFACT
from saml2.saml import NAMEID_FORMAT_PERSISTENT
from saml2.saml import NAME_FORMAT_URI
from pathutils import full_path
BASE = "http://localhost:8088"
CONFIG = {
"entityid": "%s/saml/idp" % BASE,
"name": "Rolands IdP",
"service": {
"aa": {
"endpoints": {
"attribute_service": [
("%s/aap" % BASE, BINDING_HTTP_POST),
("%s/aas" % BASE, BINDING_SOAP)
]
},
},
"aq": {
"endpoints": {
"authn_query_service": [
("%s/aqs" % BASE, BINDING_SOAP)
]
},
},
"idp": {
"endpoints": {
"single_sign_on_service": [
("%s/sso/redirect" % BASE, BINDING_HTTP_REDIRECT),
("%s/sso/post" % BASE, BINDING_HTTP_POST),
("%s/sso/art" % BASE, BINDING_HTTP_ARTIFACT),
("%s/sso/paos" % BASE, BINDING_SOAP)
],
"single_logout_service": [
("%s/slo/soap" % BASE, BINDING_SOAP),
("%s/slo/post" % BASE, BINDING_HTTP_POST)
],
"artifact_resolution_service": [
("%s/ars" % BASE, BINDING_SOAP)
],
"assertion_id_request_service": [
("%s/airs" % BASE, BINDING_URI)
],
"authn_query_service": [
("%s/aqs" % BASE, BINDING_SOAP)
],
"manage_name_id_service": [
("%s/mni/soap" % BASE, BINDING_SOAP),
("%s/mni/post" % BASE, BINDING_HTTP_POST),
("%s/mni/redirect" % BASE, BINDING_HTTP_REDIRECT),
("%s/mni/art" % BASE, BINDING_HTTP_ARTIFACT)
],
"name_id_mapping_service": [
("%s/nim/soap" % BASE, BINDING_SOAP),
("%s/nim/post" % BASE, BINDING_HTTP_POST),
("%s/nim/redirect" % BASE, BINDING_HTTP_REDIRECT),
("%s/nim/art" % BASE, BINDING_HTTP_ARTIFACT)
]
},
"policy": {
"default": {
"lifetime": {"minutes": 15},
"attribute_restrictions": None, # means all I have
"name_form": NAME_FORMAT_URI,
},
"urn:mace:example.com:saml:roland:sp": {
"lifetime": {"minutes": 5},
"nameid_format": NAMEID_FORMAT_PERSISTENT,
# "attribute_restrictions":{
# "givenName": None,
# "surName": None,
# }
}
},
"subject_data": ("mongodb", "subject"),
"session_storage": ("mongodb", "session")
},
},
"debug": 1,
"key_file": full_path("test.key"),
"cert_file": full_path("test.pem"),
#"xmlsec_binary": None,
"xmlsec_path": ["/opt/local/bin", "usr/local/bin"],
"metadata": [{
"class": "saml2.mdstore.MetaDataFile",
"metadata": [(full_path("servera.xml"), ),
(full_path("vo_metadata.xml"), )],
}],
"attribute_map_dir": full_path("attributemaps"),
"organization": {
"name": "Exempel AB",
"display_name": [("Exempel ÄB", "se"), ("Example Co.", "en")],
"url": "http://www.example.com/roland",
},
"contact_person": [
{
"given_name":"John",
"sur_name": "Smith",
"email_address": ["john.smith@example.com"],
"contact_type": "technical",
},
],
}
| tpazderka/pysaml2 | tests/idp_conf_mdb.py | Python | bsd-2-clause | 3,959 | 0.001011 |
import pytest
import subprocess
import tempfile
import shutil
import os
import config
import time
import pymongo
@pytest.fixture(scope='session')
def mongod(request):
subprocess.call(['pkill', '-f', 'mongod*tmp'])
server = MongoServer()
server.start()
def stop():
server.stop()
server.clean()
request.addfinalizer(stop)
from tests.base_test_case import BaseTestCase
BaseTestCase.mongod = server
return server
@pytest.fixture(scope='session')
def exclusive_tests(request):
subprocess.call(['pkill', '-f', 'code/server/app.py'])
class MongoServer(object):
def __init__(self):
self.tmp_path = tempfile.mkdtemp()
self.db_path = os.path.join(self.tmp_path, 'db')
os.mkdir(self.db_path)
def start(self):
self.server = subprocess.Popen(
['mongod', '--dbpath', self.db_path, '--port',
str(config.MONGO_PORT), '--smallfiles']
)
self.wait_alive()
def stop(self):
self.server.terminate()
self.server.wait()
def clean(self):
shutil.rmtree(self.tmp_path)
def drop_db(self):
client = pymongo.MongoClient(config.MONGO_URL())
client.drop_database(config.MONGO_DB_NAME)
def wait_alive(self):
while True:
try:
client = pymongo.MongoClient(config.MONGO_URL())
result = client.admin.command('ping')
if result['ok']:
break
except:
pass
time.sleep(0.1)
| alexander-gridnev/mongstore | code/server/tests/conftest.py | Python | gpl-2.0 | 1,564 | 0.000639 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-31 14:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0032_add_bulk_delete_page_permission'),
]
operations = [
migrations.AlterField(
model_name='page',
name='expire_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='expiry date/time'),
),
migrations.AlterField(
model_name='page',
name='go_live_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='go live date/time'),
),
]
| zerolab/wagtail | wagtail/core/migrations/0033_remove_golive_expiry_help_text.py | Python | bsd-3-clause | 674 | 0.002967 |
# -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
DAYS = [
'Mo',
'Tu',
'We',
'Th',
'Fr',
'Sa',
'Su'
]
class SparNoSpider(scrapy.Spider):
name = "spar_no"
allowed_domains = ["spar.no"]
start_urls = (
'https://spar.no/Finn-butikk/',
)
def parse(self, response):
shops = response.xpath('//div[@id="js_subnav"]//li[@class="level-1"]/a/@href')
for shop in shops:
yield scrapy.Request(
response.urljoin(shop.extract()),
callback=self.parse_shop
)
def parse_shop(self, response):
props = {}
ref = response.xpath('//h1[@itemprop="name"]/text()').extract_first()
if ref: # some links redirects back to list page
props['ref'] = ref.strip("\n").strip()
else:
return
days = response.xpath('//div[@itemprop="openingHoursSpecification"]')
if days:
for day in days:
day_list = day.xpath('.//link[@itemprop="dayOfWeek"]/@href').extract()
first = 0
last = 0
for d in day_list:
st = d.replace('https://purl.org/goodrelations/v1#', '')[:2]
first = DAYS.index(st) if first>DAYS.index(st) else first
last = DAYS.index(st) if first>DAYS.index(st) else first
props['opening_hours'] = DAYS[first]+'-'+DAYS[last]+' '+day.xpath('.//meta[@itemprop="opens"]/@content').extract_first()+' '+day.xpath('.//meta[@itemprop="closes"]/@content').extract_first()
phone = response.xpath('//a[@itemprop="telephone"]/text()').extract_first()
if phone:
props['phone'] = phone
addr_full = response.xpath('//div[@itemprop="streetAddress"]/text()').extract_first()
if addr_full:
props['addr_full'] = addr_full
postcode = response.xpath('//span[@itemprop="postalCode"]/text()').extract_first()
if postcode:
props['postcode'] = postcode
city = response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()
if city:
props['city'] = city.strip()
props['country'] = 'NO'
lat = response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()
lon = response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()
if lat and lon:
props['lat'] = float(lat)
props['lon'] = float(lon)
props['website'] = response.url
yield GeojsonPointItem(**props)
| iandees/all-the-places | locations/spiders/spar_no.py | Python | mit | 2,746 | 0.005462 |
# -*- coding: utf-8 -*-
"""
Bit Writing Request/Response
------------------------------
TODO write mask request/response
"""
import struct
from pymodbus3.constants import ModbusStatus
from pymodbus3.pdu import ModbusRequest
from pymodbus3.pdu import ModbusResponse
from pymodbus3.pdu import ModbusExceptions
from pymodbus3.utilities import pack_bitstring, unpack_bitstring
from collections import Iterable
# Local Constants
# These are defined in the spec to turn a coil on/off
_turn_coil_on = struct.pack('>H', ModbusStatus.On)
_turn_coil_off = struct.pack('>H', ModbusStatus.Off)
class WriteSingleCoilRequest(ModbusRequest):
"""
This function code is used to write a single output to either ON or OFF
in a remote device.
The requested ON/OFF state is specified by a constant in the request
data field. A value of FF 00 hex requests the output to be ON. A value
of 00 00 requests it to be OFF. All other values are illegal and will
not affect the output.
The Request PDU specifies the address of the coil to be forced. Coils
are addressed starting at zero. Therefore coil numbered 1 is addressed
as 0. The requested ON/OFF state is specified by a constant in the Coil
Value field. A value of 0XFF00 requests the coil to be ON. A value of
0X0000 requests the coil to be off. All other values are illegal and
will not affect the coil.
"""
function_code = 5
_rtu_frame_size = 8
def __init__(self, address=None, value=None, **kwargs):
""" Initializes a new instance
:param address: The variable address to write
:param value: The value to write at address
"""
ModbusRequest.__init__(self, **kwargs)
self.address = address
self.value = bool(value)
def encode(self):
""" Encodes write coil request
:returns: The byte encoded message
"""
result = struct.pack('>H', self.address)
if self.value:
result += _turn_coil_on
else:
result += _turn_coil_off
return result
def decode(self, data):
""" Decodes a write coil request
:param data: The packet data to decode
"""
self.address, value = struct.unpack('>HH', data)
self.value = (value == ModbusStatus.On)
def execute(self, context):
""" Run a write coil request against a datastore
:param context: The datastore to request from
:returns: The populated response or exception message
"""
'''if self.value not in [ModbusStatus.Off, ModbusStatus.On]:
return self.do_exception(ModbusExceptions.IllegalValue)'''
if not context.validate(self.function_code, self.address, 1):
return self.do_exception(ModbusExceptions.IllegalAddress)
context.set_values(self.function_code, self.address, [self.value])
values = context.get_values(self.function_code, self.address, 1)
return WriteSingleCoilResponse(self.address, values[0])
def __str__(self):
""" Returns a string representation of the instance
:return: A string representation of the instance
"""
return 'WriteCoilRequest({0}, {1}) => '.format(
self.address, self.value
)
class WriteSingleCoilResponse(ModbusResponse):
"""
The normal response is an echo of the request, returned after the coil
state has been written.
"""
function_code = 5
_rtu_frame_size = 8
def __init__(self, address=None, value=None, **kwargs):
""" Initializes a new instance
:param address: The variable address written to
:param value: The value written at address
"""
ModbusResponse.__init__(self, **kwargs)
self.address = address
self.value = value
def encode(self):
""" Encodes write coil response
:return: The byte encoded message
"""
result = struct.pack('>H', self.address)
if self.value:
result += _turn_coil_on
else:
result += _turn_coil_off
return result
def decode(self, data):
""" Decodes a write coil response
:param data: The packet data to decode
"""
self.address, value = struct.unpack('>HH', data)
self.value = (value == ModbusStatus.On)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'WriteCoilResponse({0}) => {1}'.format(self.address, self.value)
class WriteMultipleCoilsRequest(ModbusRequest):
"""
"This function code is used to force each coil in a sequence of coils to
either ON or OFF in a remote device. The Request PDU specifies the coil
references to be forced. Coils are addressed starting at zero. Therefore
coil numbered 1 is addressed as 0.
The requested ON/OFF states are specified by contents of the request
data field. A logical '1' in a bit position of the field requests the
corresponding output to be ON. A logical '0' requests it to be OFF."
"""
function_code = 15
_rtu_byte_count_pos = 6
def __init__(self, address=None, values=None, **kwargs):
""" Initializes a new instance
:param address: The starting request address
:param values: The values to write
"""
ModbusRequest.__init__(self, **kwargs)
self.address = address
if not values:
values = []
elif not isinstance(values, Iterable):
values = [values]
self.values = values
self.byte_count = (len(self.values) + 7) // 8
def encode(self):
""" Encodes write coils request
:returns: The byte encoded message
"""
count = len(self.values)
self.byte_count = (count + 7) // 8
packet = struct.pack('>HHB', self.address, count, self.byte_count)
packet += pack_bitstring(self.values)
return packet
def decode(self, data):
""" Decodes a write coils request
:param data: The packet data to decode
"""
self.address, count, self.byte_count = struct.unpack('>HHB', data[0:5])
values = unpack_bitstring(data[5:])
self.values = values[:count]
def execute(self, context):
""" Run a write coils request against a datastore
:param context: The datastore to request from
:returns: The populated response or exception message
"""
count = len(self.values)
if not (1 <= count <= 0x07b0):
return self.do_exception(ModbusExceptions.IllegalValue)
if self.byte_count != ((count + 7) // 8):
return self.do_exception(ModbusExceptions.IllegalValue)
if not context.validate(self.function_code, self.address, count):
return self.do_exception(ModbusExceptions.IllegalAddress)
context.set_values(self.function_code, self.address, self.values)
return WriteMultipleCoilsResponse(self.address, count)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'WriteMultipleCoilRequest ({0}) => {1} '.format(
self.address, len(self.values)
)
class WriteMultipleCoilsResponse(ModbusResponse):
"""
The normal response returns the function code, starting address, and
quantity of coils forced.
"""
function_code = 15
_rtu_frame_size = 8
def __init__(self, address=None, count=None, **kwargs):
""" Initializes a new instance
:param address: The starting variable address written to
:param count: The number of values written
"""
ModbusResponse.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self):
""" Encodes write coils response
:returns: The byte encoded message
"""
return struct.pack('>HH', self.address, self.count)
def decode(self, data):
""" Decodes a write coils response
:param data: The packet data to decode
"""
self.address, self.count = struct.unpack('>HH', data)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'WriteMultipleCoilResponse({0}, {1})'.format(
self.address, self.count
)
# Exported symbols
__all__ = [
'WriteSingleCoilRequest',
'WriteSingleCoilResponse',
'WriteMultipleCoilsRequest',
'WriteMultipleCoilsResponse',
]
| uzumaxy/pymodbus3 | pymodbus3/bit_write_message.py | Python | bsd-3-clause | 8,708 | 0 |
# -*- coding: utf-8 -*-
# Script for creating different kind of indexes in a small space as possible.
# This is intended for testing purposes.
import tables
class Descr(tables.IsDescription):
var1 = tables.StringCol(itemsize=4, shape=(), dflt='', pos=0)
var2 = tables.BoolCol(shape=(), dflt=False, pos=1)
var3 = tables.Int32Col(shape=(), dflt=0, pos=2)
var4 = tables.Float64Col(shape=(), dflt=0.0, pos=3)
# Parameters for the table and index creation
small_chunkshape = (2,)
small_blocksizes = (64, 32, 16, 8)
nrows = 43
# Create the new file
h5fname = 'indexes_2_1.h5'
h5file = tables.open_file(h5fname, 'w')
t1 = h5file.create_table(h5file.root, 'table1', Descr)
row = t1.row
for i in range(nrows):
row['var1'] = i
row['var2'] = i
row['var3'] = i
row['var4'] = i
row.append()
t1.flush()
# Do a copy of table1
t1.copy(h5file.root, 'table2')
# Create indexes of all kinds
t1.cols.var1.create_index(0, 'ultralight', _blocksizes=small_blocksizes)
t1.cols.var2.create_index(3, 'light', _blocksizes=small_blocksizes)
t1.cols.var3.create_index(6, 'medium', _blocksizes=small_blocksizes)
t1.cols.var4.create_index(9, 'full', _blocksizes=small_blocksizes)
h5file.close()
| dotsdl/PyTables | tables/tests/create_backcompat_indexes.py | Python | bsd-3-clause | 1,209 | 0.000827 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.