text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
import os
import sys
try:
import py2deb
except ImportError:
import fake_py2deb as py2deb
import constants
__app_name__ = constants.__app_name__
__description__ = """Very simple Audiobook player.
Supports playing, pausing, seeking (sort of) and saving state when changing book/closing.
Plays books arranged as dirs under myDocs/Audiobooks
.
Homepage: http://wiki.maemo.org/Nqaap"""
__author__ = "Soeren 'Pengman' Pedersen"
__email__ = "pengmeister@gmail.com"
__version__ = constants.__version__
__build__ = constants.__build__
__changelog__ = """
* More unicode improvements
""".strip()
__postinstall__ = """#!/bin/sh -e
gtk-update-icon-cache -f /usr/share/icons/hicolor
rm -f ~/.%(name)s/%(name)s.log
""" % {"name": constants.__app_name__}
def find_files(prefix, path):
for root, dirs, files in os.walk(path):
for file in files:
if file.startswith(prefix+"-"):
fileParts = file.split("-")
unused, relPathParts, newName = fileParts[0], fileParts[1:-1], fileParts[-1]
assert unused == prefix
relPath = os.sep.join(relPathParts)
yield relPath, file, newName
def unflatten_files(files):
d = {}
for relPath, oldName, newName in files:
if relPath not in d:
d[relPath] = []
d[relPath].append((oldName, newName))
return d
def build_package(distribution):
try:
os.chdir(os.path.dirname(sys.argv[0]))
except:
pass
py2deb.Py2deb.SECTIONS = py2deb.SECTIONS_BY_POLICY[distribution]
p = py2deb.Py2deb(__app_name__)
p.prettyName = constants.__pretty_app_name__
p.description = __description__
p.bugTracker="https://bugs.maemo.org/enter_bug.cgi?product=nQa%%20Audiobook%%20Player"
p.author = __author__
p.mail = __email__
p.license = "lgpl"
p.depends = ", ".join([
"python2.6 | python2.5",
"python-gtk2 | python2.5-gtk2",
"python-dbus | python2.5-dbus",
"python-telepathy | python2.5-telepathy",
"python-gobject | python2.5-gobject",
"python-simplejson",
])
maemoSpecificDepends = ", python-osso | python2.5-osso, python-hildon | python2.5-hildon"
p.depends += {
"debian": ", python-gst0.10",
"diablo": maemoSpecificDepends,
"fremantle": maemoSpecificDepends + ", python-gst0.10",
}[distribution]
p.section = {
"debian": "sound",
"diablo": "user/multimedia",
"fremantle": "user/multimedia",
}[distribution]
p.arch="all"
p.urgency="low"
p.distribution=distribution
p.repository="extras"
p.changelog = __changelog__
p.postinstall = __postinstall__
p.icon = {
"debian": "26x26-%s.png" % constants.__app_name__,
"diablo": "26x26-%s.png" % constants.__app_name__,
"fremantle": "48x48-%s.png" % constants.__app_name__,
}[distribution]
p["/opt/%s/bin" % constants.__app_name__] = [ "%s.py" % constants.__app_name__ ]
for relPath, files in unflatten_files(find_files("src", ".")).iteritems():
fullPath = "/opt/%s/lib" % constants.__app_name__
if relPath:
fullPath += os.sep+relPath
p[fullPath] = list(
"|".join((oldName, newName))
for (oldName, newName) in files
)
p["/usr/share/applications/hildon"] = ["%s.desktop" % constants.__app_name__]
p["/usr/share/icons/hicolor/26x26/hildon"] = ["26x26-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/48x48/hildon"] = ["48x48-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/64x64/hildon"] = ["64x64-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/scalable/hildon"] = ["scale-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
print p
if distribution == "debian":
print p.generate(
version="%s-%s" % (__version__, __build__),
changelog=__changelog__,
build=True,
tar=False,
changes=False,
dsc=False,
)
else:
print p.generate(
version="%s-%s" % (__version__, __build__),
changelog=__changelog__,
build=False,
tar=True,
changes=True,
dsc=True,
)
print "Building for %s finished" % distribution
if __name__ == "__main__":
if len(sys.argv) == 1:
distribution = "fremantle"
else:
distribution = sys.argv[1]
build_package(distribution)
| epage/nqaap | support/builddeb.py | Python | lgpl-2.1 | 4,263 | 0.026742 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def query_task(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import build_match_conditions
search_string = "%%%s%%" % txt
order_by_string = "%s%%" % txt
match_conditions = build_match_conditions("Task")
match_conditions = ("and" + match_conditions) if match_conditions else ""
return frappe.db.sql("""select name, subject from `tabTask`
where (`%s` like %s or `subject` like %s) %s
order by
case when `subject` like %s then 0 else 1 end,
case when `%s` like %s then 0 else 1 end,
`%s`,
subject
limit %s, %s""" %
(searchfield, "%s", "%s", match_conditions, "%s",
searchfield, "%s", searchfield, "%s", "%s"),
(search_string, search_string, order_by_string, order_by_string, start, page_len))
@frappe.whitelist()
def update_timesheet_logs(ref_dt, ref_dn, billable):
time_logs = []
if ref_dt in ["Project", "Task"]:
if ref_dt == "Project":
tasks = update_linked_tasks(ref_dn, billable)
time_logs = [get_task_time_logs(task) for task in tasks]
# flatten the list of time log lists
time_logs = [log for time_log in time_logs for log in time_log]
else:
time_logs = frappe.get_all("Timesheet Detail", filters={frappe.scrub(ref_dt): ref_dn})
elif ref_dt in ["Project Type", "Project Template"]:
projects = update_linked_projects(frappe.scrub(ref_dt), ref_dn, billable)
time_logs = [get_project_time_logs(project) for project in projects]
# flatten the list of time log lists
time_logs = [log for time_log in time_logs for log in time_log]
for log in time_logs:
frappe.db.set_value("Timesheet Detail", log.name, "billable", billable)
def update_linked_projects(ref_field, ref_value, billable):
projects = frappe.get_all("Project", filters={ref_field: ref_value})
for project in projects:
project_doc = frappe.get_doc("Project", project.name)
project_doc.billable = billable
project_doc.save()
update_linked_tasks(project.name, billable)
return projects
def update_linked_tasks(project, billable):
tasks = frappe.get_all("Task", filters={"project": project})
for task in tasks:
task_doc = frappe.get_doc("Task", task.name)
task_doc.billable = billable
task_doc.save()
return tasks
def get_project_time_logs(project):
return frappe.get_all("Timesheet Detail", filters={"project": project.name})
def get_task_time_logs(task):
return frappe.get_all("Timesheet Detail", filters={"task": task.name})
| neilLasrado/erpnext | erpnext/projects/utils.py | Python | gpl-3.0 | 2,678 | 0.020164 |
kunci = "Python"
password = raw_input("Masukan password : ")
if password == kunci:
print"Password Benar"
else:
print"Password Salah"
| GunadarmaC0d3/Gilang-Aditya-Rahman | Python/Penggunaan else if.py | Python | gpl-2.0 | 141 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import datetime
import fcntl
import os
import struct
import sys
import termios
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
import six
from natural.constant import PRINTABLE, SPARKCHAR
from natural.language import _
from natural.file import filesize
def _termsize():
'''
Get the current terminal size, returns a ``(height, width)`` tuple.
'''
try:
return struct.unpack(
'hh',
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234')
)
except:
return (
int(os.environ.get('LINES', 25)),
int(os.environ.get('COLUMNS', 80)),
)
def hexdump(stream):
'''
Display stream contents in hexadecimal and ASCII format. The ``stream``
specified must either be a file-like object that supports the ``read``
method to receive bytes, or it can be a string.
To dump a file::
>>> hexdump(file(filename)) # doctest: +SKIP
Or to dump stdin::
>>> import sys
>>> hexdump(sys.stdin) # doctest: +SKIP
:param stream: stream input
'''
if isinstance(stream, six.string_types):
stream = BytesIO(stream)
row = 0
while True:
data = stream.read(16)
if not data:
break
hextets = data.encode('hex').ljust(32)
canonical = printable(data)
print('%08x %s %s |%s|' % (
row * 16,
' '.join(hextets[x:x + 2] for x in range(0x00, 0x10, 2)),
' '.join(hextets[x:x + 2] for x in range(0x10, 0x20, 2)),
canonical,
))
row += 1
def printable(sequence):
'''
Return a printable string from the input ``sequence``
:param sequence: byte or string sequence
>>> print(printable('\\x1b[1;34mtest\\x1b[0m'))
.[1;34mtest.[0m
>>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........'
True
>>> print(printable('12345678'))
12345678
>>> print(printable('testing\\n'))
testing.
'''
return ''.join(list(
map(lambda c: c if c in PRINTABLE else '.', sequence)
))
def sparkline(data):
'''
Return a spark line for the given data set.
:value data: sequence of numeric values
>>> print sparkline([1, 2, 3, 4, 5, 6, 5, 4, 3, 1, 5, 6]) # doctest: +SKIP
▁▂▃▄▅▆▅▄▃▁▅▆
'''
min_value = float(min(data))
max_value = float(max(data))
steps = (max_value - min_value) / float(len(SPARKCHAR) - 1)
return ''.join([
SPARKCHAR[int((float(value) - min_value) / steps)]
for value in data
])
def throughput(sample, window=1, format='binary'):
'''
Return the throughput in (intelli)bytes per second.
:param sample: number of samples sent
:param window: default 1, sample window in seconds or
:class:`datetime.timedelta` object
:param format: default 'decimal', see :func:`natural.size.filesize`
>>> print(throughput(123456, 42))
2.87 KiB/s
'''
if isinstance(window, datetime.timedelta):
window = float(window.days * 86400 + window.seconds)
elif isinstance(window, six.string_types):
window = float(window)
per_second = sample / float(window)
return _('%s/s') % (filesize(per_second, format=format),)
| tehmaze/natural | natural/data.py | Python | mit | 3,460 | 0.000291 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_honeypot(apps, schema_editor):
Destination = apps.get_model("notify", "Destination")
try:
obj = Destination.objects.get(name="honeypot")
except Destination.DoesNotExist:
obj = Destination(name="honeypot", email="honeypot")
obj.save()
class Migration(migrations.Migration):
dependencies = [
('notify', '0001_initial'),
]
operations = [
migrations.RunPython(add_honeypot),
]
| andreasf/django-notify | notify/migrations/0002_add_honeypot.py | Python | mit | 553 | 0 |
#
# Copyright (C) 2000 Stephen Davies
# Copyright (C) 2000 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
from Synopsis.Processor import Parameter
from Synopsis.Formatters.HTML.View import View
from Synopsis.Formatters.HTML.Tags import *
from Directory import compile_glob
import time, os, stat, os.path
class RawFile(View):
"""A module for creating a view for each file with hyperlinked source"""
src_dir = Parameter('', 'starting point for directory listing')
base_path = Parameter('', 'path prefix to strip off of the file names')
exclude = Parameter([], 'TODO: define an exclusion mechanism (glob based ?)')
def register(self, frame):
super(RawFile, self).register(frame)
self._exclude = [compile_glob(e) for e in self.exclude]
self.__files = None
def filename(self):
return self.__filename
def title(self):
return self.__title
def _get_files(self):
"""Returns a list of (path, output_filename) for each file."""
if self.__files is not None: return self.__files
self.__files = []
dirs = [self.src_dir]
while dirs:
dir = dirs.pop(0)
for entry in os.listdir(os.path.abspath(dir)):
exclude = 0
for re in self._exclude:
if re.match(entry):
exclude = 1
break
if exclude:
continue
entry_path = os.path.join(dir, entry)
info = os.stat(entry_path)
if stat.S_ISDIR(info[stat.ST_MODE]):
dirs.append(entry_path)
else:
# strip of base_path
path = entry_path[len(self.base_path):]
if path[0] == '/': path = path[1:]
filename = self.directory_layout.file_source(path)
self.__files.append((entry_path, filename))
return self.__files
def process(self):
"""Creates a view for every file."""
for path, filename in self._get_files():
self.process_file(path, filename)
def register_filenames(self):
for path, filename in self._get_files():
self.processor.register_filename(filename, self, path)
def process_file(self, original, filename):
"""Creates a view for the given filename."""
# Check that we got the rego
reg_view, reg_scope = self.processor.filename_info(filename)
if reg_view is not self: return
self.__filename = filename
self.__title = original[len(self.base_path):]
self.start_file()
self.write_navigation_bar()
self.write('File: '+element('b', self.__title))
try:
lines = open(original, 'rt').readlines()
lineno_template = '%%%ds' % len(`len(lines)`)
lines = ['<span class="lineno">%s</span><span class="line">%s</span>\n'
%(lineno_template % (i + 1), escape(l[:-1]))
for i, l in enumerate(lines)]
self.write('<pre class="sxr">')
self.write(''.join(lines))
self.write('</pre>')
except:
self.write('An error occurred')
self.end_file()
| stefanseefeld/synopsis | Synopsis/Formatters/HTML/Views/RawFile.py | Python | lgpl-2.1 | 3,392 | 0.003243 |
#!/usr/bin/env python
# Copyright 2015, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to parse MAF file
"""
from bigquery_etl.utils.gcutils import read_mysql_query
import sys
import json
import re
from pandas import ExcelWriter
writer = ExcelWriter('maf.xlsx')
def identify_data(config):
"""Gets the metadata info from database
"""
# cloudSql connection params
host = config['cloudsql']['host']
database = config['cloudsql']['db']
user = config['cloudsql']['user']
passwd = config['cloudsql']['passwd']
# sqlquery = """
# SELECT ParticipantBarcode, SampleBarcode, AliquotBarcode, Pipeline, Platform,
# SampleType, SampleTypeCode, Study, DatafileName, DatafileNameKey, Datatype,
# DatafileUploaded, IncludeForAnalysis, DataCenterName
# FROM metadata_data
# WHERE DatafileUploaded='true'
# AND DatafileNameKey LIKE '%.maf'
# AND IncludeForAnalysis='yes'
# """
sqlquery = """
SELECT ParticipantBarcode, SampleBarcode, AliquotBarcode, Pipeline, Platform,
SampleType, SampleTypeCode, Study, DatafileName, DatafileNameKey, Datatype,
DatafileUploaded, IncludeForAnalysis, DataCenterName
FROM metadata_data
WHERE Datatype='Mutations'
AND DatafileNameKey LIKE '%.maf'
AND DatafileUploaded='true'
AND IncludeForAnalysis='yes'
"""
#sqlquery = """ select datafilename, datafilenamekey from metadata_data where 0 < instr(datafilename, 'maf') and 'true' = datafileuploaded and 0 = instr(datafilename, 'protected') group by datafilename, dataarchivename;
#"""
# connect to db and get results in a dataframe
metadata_df = read_mysql_query(host, database, user, passwd, sqlquery)
# print metadata_df
for i, x in metadata_df.iterrows():
print x.to_dict()
# print metadata_df
sys.exit()
# rename platforms in rows
for i, row in metadata_df.iterrows():
metadata = row.to_dict()
metadata_df.loc[i, 'OutDatafileNameKey'] = config['mirna']['mirna']['output_dir']\
+ metadata['DatafileName'] + '.json'
#metadata_df.loc[:, 'SampleTypeLetterCode'] = metadata_df['SampleTypeCode']\
# .map(lambda code: config['sample_code2letter'][code])
metadata_df.loc[:, 'DatafileNameKey'] = metadata_df['DatafileNameKey']\
.map(lambda inputfile: re.sub(r"^/", "", inputfile))
# tag CELLC samples
metadata_df['transform_function'] = 'mirna.mirna.transform.parse_mirna'
print "Found {0} rows, columns." .format(str(metadata_df.shape))
# Filter - check all "is_" fields - remember all 'is_' fields must be boolean
all_flag_columns = [key for key in metadata_df.columns.values if key.startswith("is_")]
flag_df = metadata_df[all_flag_columns]
metadata_df = metadata_df[flag_df.all(axis=1)]
metadata_df.to_excel(writer, 'maf_files_metadata_table')
writer.save()
print "After filtering: Found {0} rows, columns." .format(str(metadata_df.shape))
return metadata_df
if __name__ == '__main__':
print identify_data(json.load(open(sys.argv[1])))
| isb-cgc/ISB-CGC-data-proc | tcga_etl_pipeline/maf/part1/extract2.py | Python | apache-2.0 | 3,767 | 0.005575 |
from . import auth, users, rooms
from .config import config
import time
from flask import session
from pprint import pprint
import emoji
import re
escape_chars = (
("&", "&"),
("<", "<"),
(">", ">"),
)
def parse_markdown(message):
message = re.sub('\*{1}([^\*]+)\*{1}', '<b>\\1</b>', message)
message = re.sub('\_{1}([^\_]+)\_{1}', '<i>\\1</i>', message)
message = re.sub('\~{1}([^\~]+)\~{1}', '<s>\\1</s>', message)
message = re.sub('\`{1}([^\`]+)\`{1}', '<code>\\1</code>', message)
return message
def escape_message(message):
for a, b in escape_chars:
message = message.replace(a, b)
return message
def parse_message(user, room, message):
user_data = users.safe_user(user)
user_data["tag"] = rooms.get_tag(room, user)
user_data["rank"] = None # not implemented
message = escape_message(message)
message = emoji.emojize(message, use_aliases=True)
# Markdown
message = parse_markdown(message)
return {
"user": user_data,
"text": message,
"timestamp": time.time()
}
| Chittr/Chittr | chittr/api.py | Python | mit | 1,106 | 0.012658 |
from time import sleep
import unittest2 as unittest
from tweepy.api import API
from tweepy.auth import OAuthHandler
from tweepy.models import Status
from tweepy.streaming import Stream, StreamListener
from config import create_auth
from test_utils import mock_tweet
from mock import MagicMock, patch
class MockStreamListener(StreamListener):
def __init__(self, test_case):
super(MockStreamListener, self).__init__()
self.test_case = test_case
self.status_count = 0
self.status_stop_count = 0
self.connect_cb = None
def on_connect(self):
if self.connect_cb:
self.connect_cb()
def on_timeout(self):
self.test_case.fail('timeout')
return False
def on_error(self, code):
print("response: %s" % code)
return True
def on_status(self, status):
self.status_count += 1
self.test_case.assertIsInstance(status, Status)
if self.status_stop_count == self.status_count:
return False
class TweepyStreamTests(unittest.TestCase):
def setUp(self):
self.auth = create_auth()
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener, timeout=3.0)
def tearDown(self):
self.stream.disconnect()
def test_userstream(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream()
self.assertEqual(self.listener.status_count, 1)
def test_userstream_with_params(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream(_with='user', replies='all', stall_warnings=True)
self.assertEqual(self.listener.status_count, 1)
def test_sample(self):
self.listener.status_stop_count = 10
self.stream.sample()
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_filter_track(self):
self.listener.status_stop_count = 5
phrases = ['twitter']
self.stream.filter(track=phrases)
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_track_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(track=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['track'])
def test_follow_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(follow=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['follow'])
class TweepyStreamBackoffTests(unittest.TestCase):
def setUp(self):
#bad auth causes twitter to return 401 errors
self.auth = OAuthHandler("bad-key", "bad-secret")
self.auth.set_access_token("bad-token", "bad-token-secret")
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener)
def tearDown(self):
self.stream.disconnect()
def test_exp_backoff(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=100.0)
self.stream.sample()
# 1 retry, should be 4x the retry_time
self.assertEqual(self.stream.retry_time, 4.0)
def test_exp_backoff_cap(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=3.0)
self.stream.sample()
# 1 retry, but 4x the retry_time exceeds the cap, so should be capped
self.assertEqual(self.stream.retry_time, 3.0)
mock_resp = MagicMock()
mock_resp.return_value.status = 420
@patch('httplib.HTTPConnection.getresponse', mock_resp)
def test_420(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0, retry_count=0,
retry_time=1.0, retry_420=1.5, retry_time_cap=20.0)
self.stream.sample()
# no retries, but error 420, should be double the retry_420, not double the retry_time
self.assertEqual(self.stream.retry_time, 3.0)
| dnr2/fml-twitter | tweepy-master/tests/test_streaming.py | Python | mit | 4,635 | 0.002589 |
"""Test for version 3 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import tensorflow as tf
from tensorflow.g3doc.how_tos.adding_an_op import gen_zero_out_op_3
class ZeroOut3Test(tf.test.TestCase):
def test(self):
with self.test_session():
result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
def testAttr(self):
with self.test_session():
result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3)
self.assertAllEqual(result.eval(), [0, 0, 0, 2, 0])
def testNegative(self):
with self.test_session():
result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1)
with self.assertRaisesOpError("Need preserve_index >= 0, got -1"):
result.eval()
def testLarge(self):
with self.test_session():
result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17)
with self.assertRaisesOpError("preserve_index out of range"):
result.eval()
if __name__ == '__main__':
tf.test.main()
| arunhotra/tensorflow | tensorflow/g3doc/how_tos/adding_an_op/zero_out_3_test.py | Python | apache-2.0 | 1,175 | 0.011064 |
import unittest
from scrapy.contrib.downloadermiddleware.redirect import RedirectMiddleware
from scrapy.spider import BaseSpider
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response, HtmlResponse, Headers
class RedirectMiddlewareTest(unittest.TestCase):
def setUp(self):
self.spider = BaseSpider('foo')
self.mw = RedirectMiddleware()
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = Response('http://a.com', headers={'Location': 'http://a.com/redirected'}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_redirect_301(self):
def _test(method):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, method=method)
rsp = Response(url, headers={'Location': url2}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, method)
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
_test('GET')
_test('POST')
_test('HEAD')
def test_dont_redirect(self):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, meta={'dont_redirect': True})
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
def test_redirect_302(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_redirect_302_head(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='HEAD')
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'HEAD')
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_meta_refresh(self):
body = """<html>
<head><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=body)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
def test_meta_refresh_with_high_interval(self):
# meta-refresh with high intervals don't trigger redirects
body = """<html>
<head><meta http-equiv="refresh" content="1000;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=body)
rsp2 = self.mw.process_response(req, rsp, self.spider)
assert rsp is rsp2
def test_meta_refresh_trough_posted_request(self):
body = """<html>
<head><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org', method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = HtmlResponse(url='http://example.org', body=body)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/302')
rsp = Response('http://scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = Response('http://www.scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = Response('http://scrapytest.org/first', headers={'Location': '/redirected'}, status=302)
req2 = self.mw.process_response(req1, rsp1, self.spider)
rsp2 = Response('http://scrapytest.org/redirected', headers={'Location': '/redirected2'}, status=302)
req3 = self.mw.process_response(req2, rsp2, self.spider)
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
if __name__ == "__main__":
unittest.main()
| mzdaniel/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_downloadermiddleware_redirect.py | Python | agpl-3.0 | 7,244 | 0.002761 |
"""Support for Blockchain.com sensors."""
from __future__ import annotations
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Blockchain.com sensors."""
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(SensorEntity):
"""Representation of a Blockchain.com sensor."""
_attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
_attr_icon = ICON
_attr_native_unit_of_measurement = "BTC"
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._attr_name = name
self.addresses = addresses
def update(self):
"""Get the latest state of the sensor."""
self._attr_native_value = get_balance(self.addresses)
| rohitranjan1991/home-assistant | homeassistant/components/blockchain/sensor.py | Python | mit | 1,990 | 0 |
from __future__ import division, print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.misc import logsumexp
from autograd.convenience_wrappers import value_and_grad as vgrad
from functools import partial
from os.path import join, dirname
import string
import sys
def EM(init_params, data, callback=None):
def EM_update(params):
natural_params = map(np.log, params)
loglike, E_stats = vgrad(log_partition_function)(natural_params, data) # E step
if callback: callback(loglike, params)
return map(normalize, E_stats) # M step
def fixed_point(f, x0):
x1 = f(x0)
while different(x0, x1):
x0, x1 = x1, f(x1)
return x1
def different(params1, params2):
allclose = partial(np.allclose, atol=1e-3, rtol=1e-3)
return not all(map(allclose, params1, params2))
return fixed_point(EM_update, init_params)
def normalize(a):
def replace_zeros(a):
return np.where(a > 0., a, 1.)
return a / replace_zeros(a.sum(-1, keepdims=True))
def log_partition_function(natural_params, data):
if isinstance(data, list):
return sum(map(partial(log_partition_function, natural_params), data))
log_pi, log_A, log_B = natural_params
log_alpha = log_pi
for y in data:
log_alpha = logsumexp(log_alpha[:,None] + log_A, axis=0) + log_B[:,y]
return logsumexp(log_alpha)
def initialize_hmm_parameters(num_states, num_outputs):
init_pi = normalize(npr.rand(num_states))
init_A = normalize(npr.rand(num_states, num_states))
init_B = normalize(npr.rand(num_states, num_outputs))
return init_pi, init_A, init_B
def build_dataset(filename, max_lines=-1):
"""Loads a text file, and turns each line into an encoded sequence."""
encodings = dict(map(reversed, enumerate(string.printable)))
digitize = lambda char: encodings[char] if char in encodings else len(encodings)
encode_line = lambda line: np.array(list(map(digitize, line)))
nonblank_line = lambda line: len(line) > 2
with open(filename) as f:
lines = f.readlines()
encoded_lines = map(encode_line, filter(nonblank_line, lines)[:max_lines])
num_outputs = len(encodings) + 1
return encoded_lines, num_outputs
if __name__ == '__main__':
np.random.seed(0)
np.seterr(divide='ignore')
# callback to print log likelihoods during training
print_loglike = lambda loglike, params: print(loglike)
# load training data
lstm_filename = join(dirname(__file__), 'lstm.py')
train_inputs, num_outputs = build_dataset(lstm_filename, max_lines=60)
# train with EM
num_states = 20
init_params = initialize_hmm_parameters(num_states, num_outputs)
pi, A, B = EM(init_params, train_inputs, print_loglike)
| barak/autograd | examples/hmm_em.py | Python | mit | 2,862 | 0.003494 |
import sys
from deap import base, creator, tools
import numpy as np
from csep.loglikelihood import calcLogLikelihood
# from models.mathUtil import calcNumberBins
import models.model
import random
import array
import time
from operator import attrgetter
# from scoop import futures
import fgeneric
import bbobbenchmarks as bn
sys.path.insert(0, '../code')
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
# pool = Pool()
# toolbox.register("map", futures.map)
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper
def gaModel(func,NGEN,CXPB,MUTPB,modelOmega,year,region, mean, n_aval, tournsize, ftarget):
"""The main function. It evolves models, namely modelLamba or individual.
"""
# start = time.clock()
# Attribute generator
toolbox.register("attr_float", random.random)
toolbox.register("mate", tools.cxOnePoint)
toolbox.register("select", tools.selTournament, tournsize=2)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# calculating the number of individuals of the populations based on the number of executions
y = int(n_aval / NGEN)
x = n_aval - y * NGEN
n = x + y
toolbox.register("evaluate", func, modelOmega=modelOmega, mean=mean)
toolbox.decorate("evaluate", tupleize)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# logbook = tools.Logbook()
# logbook.header = "gen","min","avg","max","std"
pop = toolbox.population(n)
# Evaluate the entire population
# 2 model.bins: real data, generated model
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
# numero_avaliacoes = len(pop)
# normalize fitnesses
# fitnesses = normalizeFitness(fitnesses)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# create offspring
offspring = list(toolbox.map(toolbox.clone, pop))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last ind replaced by best_pop
# Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse=True)
offspring[0] = best_pop
random.shuffle(offspring)
pop[:] = offspring
record = stats.compile(pop)
# logbook.record(gen=g, **record)
if (abs(record["min"]) - abs(ftarget)) < 10e-8:
return best_pop
if record["std"] < 10e-12:
sortedPop = sorted(pop, key=attrgetter("fitness"), reverse=True)
pop = toolbox.population(n)
pop[0] = sortedPop[0]
pop = toolbox.population(n)
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
g += 1
record = stats.compile(pop)
# logbook.record(gen=g, **record)
return best_pop
if __name__ == "__main__":
output = sys.argv[1]
gaParams = sys.argv[2]
region = sys.argv[3]
year = sys.argv[4]
f = open(gaParams, "r")
keys = ['key', 'NGEN', 'n_aval', 'qntYears', 'CXPB', 'MUTPB', 'tournsize']
params = dict()
for line in f:
if line[0] == '#':
continue
tokens = line.split()
for key, value in zip(keys, tokens):
if key == 'key':
params[key] = value
elif key == 'CXPB' or key == 'MUTPB':
params[key] = float(value)
elif key == 'region':
params[key] = value
else:
params[key] = int(value)
f.close()
# Create a COCO experiment that will log the results under the
# ./output directory
e = fgeneric.LoggingFunction(output)
observations = list()
means = list()
for i in range(params['qntYears']):
observation = models.model.loadModelDB(region + 'jmaData', year + i)
observation.bins = observation.bins.tolist()
observations.append(observation)
means.append(observation.bins)
# del observation
mean = np.mean(means, axis=0)
param = (params['region'], params['year'], params['qntYears'])
func, opt = bn.instantiate(2, iinstance=1, param=param)
observation = models.model.loadModelDB(region + 'jmaData', year + params['qntYears'] + 1)
ftarget = calcLogLikelihood(observation, observation)
del observation
e.setfun(func, opt=ftarget)
gaModel(e.evalfun,
NGEN=params['NGEN'],
CXPB=params['CXPB'],
MUTPB=params['MUTPB'],
modelOmega=observations,
year=params['year'] +
params['qntYears'],
region=params['region'],
mean=mean,
n_aval=params['n_aval'],
tournsize=params['tournsize'],
ftarget=e.ftarget)
print('ftarget=%.e4 FEs=%d fbest-ftarget=%.4e and fbest = %.4e' % (e.ftarget, e.evaluations, e.fbest - e.ftarget, e.fbest))
e.finalizerun()
print('date and time: %s' % time.asctime())
# output = generatedModel.loglikelihood
# return((-1)*output[0]) | PyQuake/earthquakemodels | code/gaModel/gamodel_bbob.py | Python | bsd-3-clause | 6,592 | 0.003337 |
from rutinas import *
inputdir="ETERNA-INI/"
outputdir="ETERNA-OUT/"
inputdb=argv[1]
f=open(inputdir+"%s-INI/numeracionsismos.dat"%inputdb,"r")
for line in f:
if "#" in line:continue
line=line.strip()
numsismo=line.split()[0]
print "Generando datos de sismo '%s'..."%numsismo
cmd="python prd2dat.py %s/%s-OUT/ %s"%(outputdir,inputdb,numsismo)
system(cmd)
f.close()
| seap-udea/tQuakes | util/Legacy/allprd.py | Python | gpl-2.0 | 391 | 0.038363 |
# Copyright 2015 Facundo Batista, Nicolás Demarchi
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://github.com/PyAr/fades
""" Tests for pip related code. """
import unittest
from unittest.mock import patch
import logassert
from fades.pipmanager import PipManager
from fades import helpers
class PipManagerTestCase(unittest.TestCase):
""" Check parsing for `pip show`. """
def setUp(self):
logassert.setup(self, 'fades.pipmanager')
def test_get_parsing_ok(self):
mocked_stdout = ['Name: foo',
'Version: 2.0.0',
'Location: ~/.local/share/fades/86cc492/lib/python3.4/site-packages',
'Requires: ']
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mock.return_value = mocked_stdout
version = mgr.get_version('foo')
self.assertEqual(version, '2.0.0')
def test_get_parsing_error(self):
mocked_stdout = ['Name: foo',
'Release: 2.0.0',
'Location: ~/.local/share/fades/86cc492/lib/python3.4/site-packages',
'Requires: ']
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
version = mgr.get_version('foo')
mock.return_value = mocked_stdout
self.assertEqual(version, '')
self.assertLoggedError('Fades is having problems getting the installed version. '
'Run with -v or check the logs for details')
def test_real_case_levenshtein(self):
mocked_stdout = [
'Metadata-Version: 1.1',
'Name: python-Levenshtein',
'Version: 0.12.0',
'License: GPL',
]
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mock.return_value = mocked_stdout
version = mgr.get_version('foo')
self.assertEqual(version, '0.12.0')
def test_install(self):
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mgr.install('foo')
mock.assert_called_with(['/usr/bin/pip', 'install', 'foo'])
def test_install_with_options(self):
mgr = PipManager('/usr/bin', pip_installed=True, options=['--bar baz'])
with patch.object(helpers, 'logged_exec') as mock:
mgr.install('foo')
mock.assert_called_with(['/usr/bin/pip', 'install', 'foo', '--bar', 'baz'])
def test_install_with_options_using_equal(self):
mgr = PipManager('/usr/bin', pip_installed=True, options=['--bar=baz'])
with patch.object(helpers, 'logged_exec') as mock:
mgr.install('foo')
mock.assert_called_with(['/usr/bin/pip', 'install', 'foo', '--bar=baz'])
def test_install_raise_error(self):
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mock.side_effect = Exception("Kapow!")
with self.assertRaises(Exception):
mgr.install('foo')
self.assertLoggedError("Error installing foo: Kapow!")
def test_install_without_pip(self):
mgr = PipManager('/usr/bin', pip_installed=False)
with patch.object(helpers, 'logged_exec') as mocked_exec:
with patch.object(mgr, '_brute_force_install_pip') as mocked_install_pip:
mgr.install('foo')
self.assertEqual(mocked_install_pip.call_count, 1)
mocked_exec.assert_called_with(['/usr/bin/pip', 'install', 'foo'])
| arielrossanigo/fades | tests/test_pipmanager.py | Python | gpl-3.0 | 4,315 | 0.001391 |
from treeseq_funcs import *
| tos-kamiya/pyrem_torq | src/pyrem_torq/treeseq/__init__.py | Python | mit | 29 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author(s): Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_local_role_manager
short_description: Manage local roles on an ESXi host
description:
- Manage local roles on an ESXi host
version_added: "2.5"
author: Abhijeet Kasurde (@akasurde) <akasurde@redhat.com>
notes:
- Tested on ESXi 6.5
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit roles
requirements:
- "python >= 2.6"
- PyVmomi
options:
local_role_name:
description:
- The local role name to be managed.
required: True
local_privilege_ids:
description:
- The list of privileges that role needs to have.
- Please see U(https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-ED56F3C4-77D0-49E3-88B6-B99B8B437B62.html)
default: []
state:
description:
- Indicate desired state of the role.
- If the role already exists when C(state=present), the role info is updated.
choices: ['present', 'absent']
default: present
force_remove:
description:
- If set to C(False) then prevents the role from being removed if any permissions are using it.
default: False
type: bool
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_role_manager command from Ansible Playbooks
- name: Add local role to ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
state: present
- name: Add local role with privileges to ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
local_privilege_ids: [ 'Folder.Create', 'Folder.Delete']
state: present
- name: Remove local role from ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
state: absent
'''
RETURN = r'''
local_role_name:
description: Name of local role
returned: always
type: string
role_id:
description: ESXi generated local role id
returned: always
type: int
old_privileges:
description: List of privileges of role before update
returned: on update
type: list
new_privileges:
description: List of privileges of role after update
returned: on update
type: list
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VMwareLocalRoleManager(PyVmomi):
def __init__(self, module):
super(VMwareLocalRoleManager, self).__init__(module)
self.module = module
self.params = module.params
self.role_name = self.params['local_role_name']
self.state = self.params['state']
self.priv_ids = self.params['local_privilege_ids']
self.force = not self.params['force_remove']
self.current_role = None
if self.content.authorizationManager is None:
self.module.fail_json(msg="Failed to get local authorization manager settings.",
details="It seems that %s is a vCenter server "
"instead of an ESXi server" % self.params['hostname'])
def process_state(self):
local_role_manager_states = {
'absent': {
'present': self.state_remove_role,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_role,
'absent': self.state_create_role,
}
}
try:
local_role_manager_states[self.state][self.check_local_role_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_role_manager_state(self):
auth_role = self.find_authorization_role()
if auth_role:
self.current_role = auth_role
return 'present'
else:
return 'absent'
def find_authorization_role(self):
desired_role = None
for role in self.content.authorizationManager.roleList:
if role.name == self.role_name:
desired_role = role
return desired_role
def state_create_role(self):
try:
role_id = self.content.authorizationManager.AddAuthorizationRole(name=self.role_name,
privIds=self.priv_ids)
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified role name "
"already exists." % self.role_name,
details=e.msg)
except vim.fault.InvalidName as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified role name "
"is empty" % self.role_name,
details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified privileges "
"are unknown" % self.role_name,
details=e.msg)
result = {
'changed': True,
'role_id': role_id,
'privileges': self.priv_ids,
'local_role_name': self.role_name,
}
self.module.exit_json(**result)
def state_remove_role(self):
try:
self.content.authorizationManager.RemoveAuthorizationRole(roleId=self.current_role.roleId,
failIfUsed=self.force)
except vim.fault.NotFound as e:
self.module.fail_json(msg="Failed to remove a role %s as the user specified role name "
"does not exist." % self.role_name,
details=e.msg)
except vim.fault.RemoveFailed as e:
msg = "Failed to remove a role %s as the user specified role name." % self.role_name
if self.force:
msg += " Use force_remove as True."
self.module.fail_json(msg=msg, details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to remove a role %s as the user specified "
"role is a system role" % self.role_name,
details=e.msg)
result = {
'changed': True,
'role_id': self.current_role.roleId,
'local_role_name': self.role_name,
}
self.module.exit_json(**result)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_role(self):
current_privileges = set(self.current_role.privilege)
# Add system-defined privileges, "System.Anonymous", "System.View", and "System.Read".
self.params['local_privilege_ids'].extend(['System.Anonymous', 'System.Read', 'System.View'])
desired_privileges = set(self.params['local_privilege_ids'])
changed_privileges = current_privileges ^ desired_privileges
changed_privileges = list(changed_privileges)
if not changed_privileges:
self.state_exit_unchanged()
# Delete unwanted privileges that are not required
for priv in changed_privileges:
if priv not in desired_privileges:
changed_privileges.remove(priv)
try:
self.content.authorizationManager.UpdateAuthorizationRole(roleId=self.current_role.roleId,
newName=self.current_role.name,
privIds=changed_privileges)
except vim.fault.NotFound as e:
self.module.fail_json(msg="Failed to update Role %s. Please check privileges "
"provided for update" % self.role_name,
details=e.msg)
except vim.fault.InvalidName as e:
self.module.fail_json(msg="Failed to update Role %s as role name is empty" % self.role_name,
details=e.msg)
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg="Failed to update Role %s." % self.role_name,
details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to update Role %s as user specified "
"role is system role which can not be changed" % self.role_name,
details=e.msg)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to update Role %s as current session does not"
" have any privilege to update specified role" % self.role_name,
details=e.msg)
role = self.find_authorization_role()
result = {
'changed': True,
'role_id': role.roleId,
'local_role_name': role.name,
'new_privileges': role.privilege,
'old_privileges': current_privileges,
}
self.module.exit_json(**result)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_role_name=dict(required=True, type='str'),
local_privilege_ids=dict(default=[], type='list'),
force_remove=dict(default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
vmware_local_role_manager = VMwareLocalRoleManager(module)
vmware_local_role_manager.process_state()
if __name__ == '__main__':
main()
| dataxu/ansible | lib/ansible/modules/cloud/vmware/vmware_local_role_manager.py | Python | gpl-3.0 | 10,915 | 0.003207 |
import ble
import mqtt
from config import *
try:
ble.start()
mqtt.start()
finally:
#notify MQTT subscribers that gateway is offline
mqtt.publish(TOPIC_PREFIX, "offline")
ble.stop()
| blassphemy/mqtt-ble-gateway | main.py | Python | apache-2.0 | 208 | 0.009615 |
#abstract_component.py
import threading
class AbstractComponent:
logger = None
def init(self):
pass
def finalize(self):
pass
class AbstractQueryProviderComponent(AbstractComponent):
async_query_availabale = False
def getAvailableQueryCodeSet(self):
raise NotImplementedError('You have to override AbstractQueryProviderComponent.getAvailableQuerySet method')
def query(self, query_code, arg_set):
raise NotImplementedError('You have to override AbstractQueryProviderComponent.query method')
class AbstractSubscriptionProviderComponent(AbstractComponent):
def getAvailableSubscriptionCodeSet(self):
raise NotImplementedError('You have to override AbstractSubscriptionProviderComponent.getAvailableSubscriptionSet method')
def subscribe(self, subscribe_code, arg_set, queue):
raise NotImplementedError('You have to override AbstractSubscriptionProviderComponent.subscribe method')
def unsubscribe(self, subscribe_code, arg_set, queue):
raise NotImplementedError('You have to override AbstractSubscriptionProviderComponent.unsubscribe method') | 3WiseMen/python | pystock/pystock_xingAPI/abstract_component.py | Python | mit | 1,088 | 0.029412 |
import lexer
s = "program id; var beto: int; { id = 1234; }"
lexer.lexer.input(s)
for token in lexer.lexer:
print token
| betoesquivel/PLYpractice | testingLexer.py | Python | mit | 126 | 0 |
# coding: utf-8
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'tests.myapp',
'test_without_migrations',
'django_nose'
)
SITE_ID=1,
SECRET_KEY='secret'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEST_WITHOUT_MIGRATIONS_COMMAND = 'django_nose.management.commands.test.Command'
| henriquebastos/django-test-without-migrations | tests/nose_settings.py | Python | mit | 509 | 0.005894 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import binascii
import datetime
import json
import logging
import os
import time
import urllib
import webapp2
from babel import Locale
from google.appengine.api import search, users as gusers
from google.appengine.ext import db
from google.appengine.ext.deferred import deferred
from google.appengine.ext.webapp import template
from markdown import Markdown
from mcfw.cache import cached
from mcfw.consts import MISSING
from mcfw.exceptions import HttpNotFoundException
from mcfw.restapi import rest, GenericRESTRequestHandler
from mcfw.rpc import serialize_complex_value, arguments, returns
from rogerthat.bizz.communities.communities import get_communities_by_country, get_community, get_community_countries
from rogerthat.bizz.friends import user_code_by_hash, makeFriends, ORIGIN_USER_INVITE
from rogerthat.bizz.registration import get_headers_for_consent
from rogerthat.bizz.service import SERVICE_LOCATION_INDEX, re_index_map_only
from rogerthat.bizz.session import create_session
from rogerthat.dal.app import get_app_by_id
from rogerthat.exceptions.login import AlreadyUsedUrlException, InvalidUrlException, ExpiredUrlException
from rogerthat.models import ProfilePointer, ServiceProfile
from rogerthat.pages.legal import DOC_TERMS_SERVICE, get_current_document_version, get_version_content, \
get_legal_language, LANGUAGES as LEGAL_LANGUAGES
from rogerthat.pages.login import SetPasswordHandler
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.settings import get_server_settings
from rogerthat.templates import get_languages_from_request
from rogerthat.to import ReturnStatusTO, RETURNSTATUS_TO_SUCCESS, WarningReturnStatusTO
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils import bizz_check, try_or_defer, get_country_code_by_ipaddress
from rogerthat.utils.app import get_app_id_from_app_user
from rogerthat.utils.cookie import set_cookie
from rogerthat.utils.service import create_service_identity_user
from shop import SHOP_JINJA_ENVIRONMENT
from shop.bizz import create_customer_signup, complete_customer_signup, get_organization_types, \
update_customer_consents, get_customer_signup, validate_customer_url_data, \
get_customer_consents
from shop.business.permissions import is_admin
from shop.constants import OFFICIALLY_SUPPORTED_LANGUAGES
from shop.models import Customer
from shop.to import CompanyTO, CustomerTO, CustomerLocationTO
from shop.view import get_shop_context, get_current_http_host
from solution_server_settings import get_solution_server_settings
from solutions import translate
from solutions.common.bizz.grecaptcha import recaptcha_verify
from solutions.common.bizz.settings import get_consents_for_community
from solutions.common.integrations.cirklo.cirklo import get_whitelisted_merchant
from solutions.common.integrations.cirklo.models import CirkloMerchant, CirkloCity
from solutions.common.markdown_newtab import NewTabExtension
from solutions.common.models import SolutionServiceConsent
from solutions.common.restapi.services import do_create_service
from solutions.common.to.settings import PrivacySettingsGroupTO
class StaticFileHandler(webapp2.RequestHandler):
def get(self, filename):
cur_path = os.path.dirname(__file__)
path = os.path.join(cur_path, u'html', filename)
with open(path, 'r') as f:
self.response.write(f.read())
class GenerateQRCodesHandler(webapp2.RequestHandler):
def get(self):
current_user = gusers.get_current_user()
if not is_admin(current_user):
self.abort(403)
path = os.path.join(os.path.dirname(__file__), 'html', 'generate_qr_codes.html')
context = get_shop_context()
self.response.out.write(template.render(path, context))
class CustomerMapHandler(webapp2.RequestHandler):
def get(self, app_id):
path = os.path.join(os.path.dirname(__file__), 'html', 'customer_map.html')
settings = get_server_settings()
lang = get_languages_from_request(self.request)[0]
translations = {
'merchants': translate(lang, 'merchants'),
'merchants_with_terminal': translate(lang, 'merchants_with_terminal'),
'community_services': translate(lang, 'community_services'),
'care': translate(lang, 'care'),
'associations': translate(lang, 'associations'),
}
params = {
'maps_key': settings.googleMapsKey,
'app_id': app_id,
'translations': json.dumps(translations)
}
self.response.out.write(template.render(path, params))
@cached(2, 21600)
@returns(unicode)
@arguments(app_id=unicode)
def get_customer_locations_for_app(app_id):
query_string = (u'app_ids:"%s"' % app_id)
query = search.Query(query_string=query_string,
options=search.QueryOptions(returned_fields=['service', 'name', 'location', 'description'],
limit=1000))
search_result = search.Index(name=SERVICE_LOCATION_INDEX).search(query)
customers = {customer.service_email: customer for customer in Customer.list_by_app_id(app_id)}
def map_result(service_search_result):
customer_location = CustomerLocationTO()
for field in service_search_result.fields:
if field.name == 'service':
customer = customers.get(field.value.split('/')[0])
if customer:
customer_location.address = customer.address1
customer_location.type = customer.organization_type
if customer.address2:
customer_location.address += '\n%s' % customer.address2
if customer.zip_code or customer.city:
customer_location.address += '\n'
if customer.zip_code:
customer_location.address += customer.zip_code
if customer.zip_code and customer.city:
customer_location.address += ' '
if customer.city:
customer_location.address += customer.city
else:
customer_location.type = ServiceProfile.ORGANIZATION_TYPE_PROFIT
continue
if field.name == 'name':
customer_location.name = field.value
continue
if field.name == 'location':
customer_location.lat = field.value.latitude
customer_location.lon = field.value.longitude
continue
if field.name == 'description':
customer_location.description = field.value
continue
return customer_location
return json.dumps(serialize_complex_value([map_result(r) for r in search_result.results], CustomerLocationTO, True))
class CustomerMapServicesHandler(webapp2.RequestHandler):
def get(self, app_id):
customer_locations = get_customer_locations_for_app(app_id)
self.response.write(customer_locations)
@rest('/unauthenticated/loyalty/scanned', 'get', read_only_access=True, authenticated=False)
@returns(ReturnStatusTO)
@arguments(user_email_hash=unicode, merchant_email=unicode, app_id=unicode)
def rest_loyalty_scanned(user_email_hash, merchant_email, app_id):
try:
bizz_check(user_email_hash is not MISSING, 'user_email_hash is required')
bizz_check(merchant_email is not MISSING, 'merchant_email is required')
bizz_check(app_id is not MISSING, 'app_id is required')
user_code = user_code_by_hash(binascii.unhexlify(user_email_hash))
profile_pointer = ProfilePointer.get(user_code)
if not profile_pointer:
logging.debug('No ProfilePointer found with user_code %s', user_code)
raise BusinessException('User not found')
app_user = profile_pointer.user
bizz_check(get_app_by_id(app_id), 'App not found')
bizz_check(app_id == get_app_id_from_app_user(profile_pointer.user), 'Invalid user email hash')
merchant_found = False
for customer in Customer.list_by_user_email(merchant_email):
merchant_found = True
service_user = users.User(customer.service_email)
logging.info('Received loyalty scan of %s by %s (%s)', app_user, service_user, customer.user_email)
makeFriends(service_user, app_user, None, None, ORIGIN_USER_INVITE,
notify_invitee=False,
notify_invitor=False,
allow_unsupported_apps=True)
bizz_check(merchant_found, 'Merchant not found')
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
else:
return RETURNSTATUS_TO_SUCCESS
class PublicPageHandler(webapp2.RequestHandler):
@property
def language(self):
return get_languages_from_request(self.request)[0]
def translate(self, key, **kwargs):
return translate(self.language, key, **kwargs)
def render(self, template_name, **params):
if not params.get('language'):
params['language'] = self.language
routes = ['signin', 'signup', 'reset_password', 'set_password']
for route_name in routes:
url = self.url_for(route_name)
params[route_name + '_url'] = url
template_path = 'public/%s.html' % template_name
return SHOP_JINJA_ENVIRONMENT.get_template(template_path).render(params)
def return_error(self, message, **kwargs):
translated_message = self.translate(message, **kwargs)
self.response.out.write(self.render('error', message=translated_message))
def dispatch(self):
if users.get_current_user():
return self.redirect('/')
return super(PublicPageHandler, self).dispatch()
class CustomerSigninHandler(PublicPageHandler):
def get(self, app_id=None):
self.response.write(self.render('signin'))
class CustomerSignupHandler(PublicPageHandler):
def get(self):
language = (self.request.get('language') or self.language).split('_')[0]
if language not in LEGAL_LANGUAGES:
language = DEFAULT_LANGUAGE
solution_server_settings = get_solution_server_settings()
version = get_current_document_version(DOC_TERMS_SERVICE)
legal_language = get_legal_language(language)
countries = get_community_countries()
selected_country = get_country_code_by_ipaddress(os.environ.get('HTTP_X_FORWARDED_FOR', None))
if selected_country:
communities = get_communities_by_country(selected_country)
else:
communities = []
params = {
'recaptcha_site_key': solution_server_settings.recaptcha_site_key,
'email_verified': False,
'toc_content': get_version_content(legal_language, DOC_TERMS_SERVICE, version),
'language': language.lower(),
'languages': [(code, name) for code, name in OFFICIALLY_SUPPORTED_LANGUAGES.iteritems()
if code in LEGAL_LANGUAGES],
'countries': [(country, Locale(language, country).get_territory_name()) for country in countries],
'communities': communities,
'selected_country': selected_country,
'signup_success': json.dumps(self.render('signup_success', language=language))
}
self.response.write(self.render('signup', **params))
class CustomerSignupPasswordHandler(PublicPageHandler):
def get(self):
data = self.request.get('data')
email = self.request.get('email').rstrip('.')
params = {
'email': email,
'data': data,
'language': self.language,
'error': None,
}
self.response.write(self.render('signup_setpassword', **params))
def post(self):
json_data = json.loads(self.request.body)
email = json_data.get('email')
data = json_data.get('data')
password = json_data.get('password', '')
password_confirm = json_data.get('password_confirm')
error = None
try:
signup, _ = get_customer_signup(email, data) # type: CustomerSignup, dict
except ExpiredUrlException:
error = self.translate('link_expired', action='')
except AlreadyUsedUrlException:
error = self.translate('link_is_already_used', action='')
except InvalidUrlException:
error = self.translate('invalid_url')
if len(password) < 8:
error = self.translate('password_length_error', length=8)
elif password != password_confirm:
error = self.translate('password_match_error')
if not error:
tos_version = get_current_document_version(DOC_TERMS_SERVICE)
result = do_create_service(signup.city_customer, signup.language, True, signup, password, tos_version=tos_version)
if result.success:
service_email = result.data['service_email']
deferred.defer(complete_customer_signup, email, data, service_email)
try:
# Sleep to allow datastore indexes to update
time.sleep(2)
secret, _ = create_session(users.User(signup.company_email), ignore_expiration=True, cached=False)
server_settings = get_server_settings()
set_cookie(self.response, server_settings.cookieSessionName, secret)
except:
logging.error("Failed to create session", exc_info=True)
else:
result = WarningReturnStatusTO.create(False, error)
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(result.to_dict()))
class CustomerResetPasswordHandler(PublicPageHandler):
def get(self):
self.response.out.write(self.render('reset_password'))
class CustomerSetPasswordHandler(PublicPageHandler, SetPasswordHandler):
"""Inherit PublicPageHandler first to override SetPasswordHandler return_error()"""
def get(self):
email = self.request.get('email')
data = self.request.get('data')
try:
parsed_data = self.parse_and_validate_data(email, data)
except ExpiredUrlException as e:
return self.return_error("link_expired", action=e.action)
except AlreadyUsedUrlException as e:
return self.return_error("link_is_already_used", action=e.action)
except InvalidUrlException:
return self.return_error('invalid_url')
params = {
'name': parsed_data['n'],
'email': email,
'action': parsed_data['a'],
'data': data,
}
self.response.out.write(self.render('set_password', **params))
def post(self):
super(CustomerSetPasswordHandler, self).post()
@rest('/unauthenticated/osa/customer/signup', 'post', read_only_access=True, authenticated=False)
@returns(ReturnStatusTO)
@arguments(city_customer_id=(int, long), company=CompanyTO, customer=CustomerTO, recaptcha_token=unicode,
email_consents=dict)
def customer_signup(city_customer_id, company, customer, recaptcha_token, email_consents=None):
try:
headers = get_headers_for_consent(GenericRESTRequestHandler.getCurrentRequest())
create_customer_signup(city_customer_id, company, customer, recaptcha_token,
domain=get_current_http_host(with_protocol=True), headers=headers, accept_missing=True)
headers = get_headers_for_consent(GenericRESTRequestHandler.getCurrentRequest())
consents = email_consents or {}
context = u'User signup'
try_or_defer(update_customer_consents, customer.user_email, consents, headers, context)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
def parse_euvat_address_eu(address):
address = address.strip().splitlines()
zc_ci = address.pop()
zip_code, city = zc_ci.split(' ', 1)
address1 = address.pop(0) if len(address) > 0 else ''
address2 = address.pop(0) if len(address) > 0 else ''
return address1, address2, zip_code, city
@rest('/unauthenticated/osa/signup/community-info/<community_id:[^/]+>', 'get', read_only_access=True,
authenticated=False)
@returns(dict)
@arguments(community_id=(int, long), language=unicode)
def get_customer_info(community_id, language=None):
community = get_community(community_id)
if not community:
raise HttpNotFoundException('Community not found')
if not language:
request = GenericRESTRequestHandler.getCurrentRequest()
language = get_languages_from_request(request)[0]
customer = Customer.get_by_service_email(community.main_service) # type: Customer
organization_types = dict(get_organization_types(customer, community.default_app, language))
return {
'customer': {
'id': customer.id,
},
'organization_types': organization_types
}
@rest('/unauthenticated/osa/signup/communities/<country_code:[^/]+>', 'get', read_only_access=True, authenticated=False)
@returns([dict])
@arguments(country_code=unicode)
def api_get_communities(country_code):
return [{'name': community.name, 'id': community.id} for community in get_communities_by_country(country_code)]
@rest('/unauthenticated/osa/signup/privacy-settings/<community_id:[^/]+>', 'get', read_only_access=True,
authenticated=False)
@returns([PrivacySettingsGroupTO])
@arguments(community_id=(int, long), language=unicode)
def get_privacy_settings(community_id, language=None):
if not language:
request = GenericRESTRequestHandler.getCurrentRequest()
language = get_languages_from_request(request)[0]
return get_consents_for_community(community_id, language, [])
class CustomerCirkloAcceptHandler(PublicPageHandler):
def get_url(self, customer):
url_params = urllib.urlencode({'cid': customer.id})
return '/customers/consent/cirklo?{}'.format(url_params)
def dispatch(self):
# Don't redirect to dashboard when logged in
return super(PublicPageHandler, self).dispatch()
def get(self):
customer_id = self.request.get('cid')
if customer_id:
try:
customer = Customer.get_by_id(long(customer_id))
except:
return self.return_error('invalid_url')
else:
email = self.request.get('email')
data = self.request.get('data')
try:
data = validate_customer_url_data(email, data)
except InvalidUrlException:
return self.return_error('invalid_url')
customer = db.get(data['s']) # Customer
if not customer:
return self.abort(404)
consents = get_customer_consents(customer.user_email)
should_accept = False
if SolutionServiceConsent.TYPE_CITY_CONTACT not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CITY_CONTACT)
should_accept = True
if SolutionServiceConsent.TYPE_CIRKLO_SHARE not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CIRKLO_SHARE)
should_accept = True
params = {
'cirklo_accept_url': self.get_url(customer),
'should_accept': should_accept
}
self.response.out.write(self.render('cirklo_accept', **params))
def post(self):
try:
customer_id = self.request.get('cid')
customer = Customer.get_by_id(long(customer_id)) # type: Customer
if not customer:
raise Exception('Customer not found')
except:
self.redirect('/')
return
consents = get_customer_consents(customer.user_email)
should_put_consents = False
if SolutionServiceConsent.TYPE_CITY_CONTACT not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CITY_CONTACT)
should_put_consents = True
if SolutionServiceConsent.TYPE_CIRKLO_SHARE not in consents.types:
consents.types.append(SolutionServiceConsent.TYPE_CIRKLO_SHARE)
should_put_consents = True
if should_put_consents:
consents.put()
community = get_community(customer.community_id)
city_id = CirkloCity.get_by_service_email(community.main_service).city_id
service_user_email = customer.service_user.email()
cirklo_merchant_key = CirkloMerchant.create_key(service_user_email)
cirklo_merchant = cirklo_merchant_key.get() # type: CirkloMerchant
if not cirklo_merchant:
cirklo_merchant = CirkloMerchant(key=cirklo_merchant_key) # type: CirkloMerchant
cirklo_merchant.denied = False
logging.debug('Creating new cirklo merchant')
cirklo_merchant.creation_date = datetime.datetime.utcfromtimestamp(customer.creation_time)
cirklo_merchant.service_user_email = service_user_email
cirklo_merchant.customer_id = customer.id
cirklo_merchant.city_id = city_id
cirklo_merchant.data = None
cirklo_merchant.populate_from_cirklo(get_whitelisted_merchant(city_id, customer.user_email))
cirklo_merchant.put()
logging.debug('Saving cirklo merchant: %s', cirklo_merchant)
service_identity_user = create_service_identity_user(customer.service_user)
try_or_defer(re_index_map_only, service_identity_user)
else:
logging.debug('Not saving cirklo merchant, consents:', consents)
self.redirect(self.get_url(customer))
class VouchersCirkloSignupHandler(PublicPageHandler):
def get(self, city_id=''):
supported_languages = ["nl", "fr"]
language = (self.request.get('language') or self.language).split('_')[0].lower()
cities = []
if city_id and city_id != 'staging':
city = CirkloCity.create_key(city_id).get()
if city:
cities = [city]
if not cities:
if city_id and city_id == 'staging':
cities = [city for city in CirkloCity.list_signup_enabled() if city.city_id.startswith('staging-')]
else:
cities = [city for city in CirkloCity.list_signup_enabled() if not city.city_id.startswith('staging-')]
solution_server_settings = get_solution_server_settings()
if language not in supported_languages:
language = supported_languages[0]
if language == 'fr':
sorted_cities = sorted(cities, key=lambda x: x.signup_names.fr)
else:
sorted_cities = sorted(cities, key=lambda x: x.signup_names.nl)
params = {
'city_id': city_id or None,
'cities': sorted_cities,
'recaptcha_site_key': solution_server_settings.recaptcha_site_key,
'language': language,
'languages': [(code, name) for code, name in OFFICIALLY_SUPPORTED_LANGUAGES.iteritems()
if code in supported_languages]
}
md = Markdown(output='html', extensions=['nl2br', NewTabExtension()])
lines = [
'#### %s' % translate(language, 'cirklo_info_title'),
'<br />',
translate(language, 'cirklo_info_text_signup'),
'',
translate(language, 'cirklo_participation_text_signup'),
]
params['privacy_settings'] = {
'cirklo': {
'label': translate(language, 'consent_cirklo_share'),
'description': md.convert('\n\n'.join(lines))
},
'city': {
'label': translate(language, 'consent_city_contact'),
'description': '<h4>%s</h4>' % translate(language, 'consent_share_with_city')
}
}
params['signup_success'] = md.convert('\n\n'.join([translate(language, 'cirklo.signup.success')]))
self.response.write(self.render('cirklo_signup', **params))
def post(self):
json_data = json.loads(self.request.body)
logging.debug(json_data)
if not recaptcha_verify(json_data['recaptcha_token']):
logging.debug('Cannot verify recaptcha response')
self.abort(400)
if not CirkloCity.create_key(json_data['city_id']).get():
logging.debug('CirkloCity was invalid')
self.abort(400)
self.response.headers['Content-Type'] = 'text/json'
whitelisted_merchant = get_whitelisted_merchant(json_data['city_id'], json_data['company']['email'])
if whitelisted_merchant:
logging.debug('email found in cirklo db')
else:
cirklo_merchant = CirkloMerchant.get_by_city_id_and_email(json_data['city_id'], json_data['company']['email'])
if cirklo_merchant:
logging.debug('email found in osa db')
whitelisted_merchant = True
if whitelisted_merchant:
return self.response.out.write(json.dumps({
'success': False,
'errormsg': translate(json_data['language'], 'cirklo.email_already_used')
}))
merchant = CirkloMerchant()
merchant.service_user_email = None
merchant.customer_id = -1
merchant.city_id = json_data['city_id']
merchant.data = {
u'company': json_data['company'],
u'language': json_data['language']
}
merchant.emails = [json_data['company']['email']]
merchant.populate_from_cirklo(None)
merchant.denied = False
merchant.put()
self.response.headers['Content-Type'] = 'text/json'
return self.response.out.write(json.dumps({'success': True, 'errormsg': None}))
| our-city-app/oca-backend | src/shop/handlers.py | Python | apache-2.0 | 26,782 | 0.002576 |
import json
import string
import random
import requests
import logging
import local_config
from couchdb import Server, http
from couchdb.http import PreconditionFailed, ResourceConflict
logger = logging.getLogger(__name__)
local_config.configure_logger(logger)
def create_db(database):
server = Server('http://localhost:5984/')
try:
db = server.create(database)
logger.info('[DB] Database %s created' % database)
except PreconditionFailed:
db = server[database]
logger.info('[DB] Database %s already exists.' % database)
return db
def get_db(database, credentials=True):
'''
Get or create given database from/in CouchDB.
'''
try:
server = Server('http://localhost:5984/')
if credentials:
server.resource.credentials = \
local_config.get_db_credentials(database)
return server[database]
except Exception:
logging.exception('[DB] Cannot connect to the database')
return None
def get_db_and_server(database):
'''
Get or create given database from/in CouchDB.
'''
try:
server = Server('http://localhost:5984/')
server.resource.credentials = local_config.get_db_credentials(database)
db = server[database]
return (db, server)
except Exception:
logging.exception('[DB] Cannot connect to the database %s' % database)
return (None, None)
def init_db(database):
'''
Create all required views to make Cozy FUSE working properly.
'''
create_db(database)
init_database_views(database)
password = get_random_key()
create_db_user(database, database, password)
logger.info('[DB] Local database %s initialized' % database)
return (database, password)
def remove_db(database):
'''
Destroy given database.
'''
server = Server('http://localhost:5984/')
try:
server.delete(database)
except http.ResourceNotFound:
logger.info('[DB] Local database %s already removed' % database)
logger.info('[DB] Local database %s removed' % database)
def get_device(name):
'''
Get device corresponding to given name. Device is returned as a dict.
'''
try:
device = list(get_db(name).view("device/all", key=name))[0].value
except IndexError:
device = None
return device
def get_folders(db):
return db.view("folder/all")
def get_files(db):
return db.view("file/all")
def get_folder(db, path):
if len(path) > 0 and path[0] != '/':
path = '/' + path
try:
folder = list(db.view("folder/byFullPath", key=path))[0].value
except IndexError:
folder = None
return folder
def get_file(db, path):
if len(path) > 0 and path[0] != '/':
path = '/' + path
try:
file_doc = list(db.view("file/byFullPath", key=path))[0].value
except IndexError:
file_doc = None
return file_doc
def get_random_key():
'''
Generate a random key of 20 chars. The first character is not a number
because CouchDB does not link string that starts with a digit.
'''
chars = string.ascii_lowercase + string.digits
random_val = ''.join(random.choice(chars) for x in range(19))
return random.choice(string.ascii_lowercase) + random_val
def create_db_user(database, login, password, protocol="http"):
'''
Create a user for given *database*. User credentials are *login* and
*password*.
'''
headers = {'content-type': 'application/json'}
data = {
"_id": "org.couchdb.user:%s" % login,
"name": login,
"type": "user",
"roles": [],
"password": password
}
requests.post('%s://localhost:5984/_users' % (protocol),
data=json.dumps(data),
headers=headers,
verify=False)
headers = {'content-type': 'application/json'}
data = {
"admins": {
"names": [login],
"roles": []
},
"members": {
"names": [login],
"roles": []
},
}
requests.put('%s://localhost:5984/%s/_security' % (protocol, database),
data=json.dumps(data),
headers=headers,
verify=False)
logger.info('[DB] Db user created')
def remove_db_user(database):
'''
Delete user created for this database.
'''
response = requests.get(
'http://localhost:5984/_users/org.couchdb.user:%s' % database)
rev = response.json().get("_rev", "")
response = requests.delete(
'http://localhost:5984/_users/org.couchdb.user:%s?rev=%s' %
(database, rev)
)
logger.info('[DB] Db user %s deleted' % database)
def init_database_view(docType, db):
'''
Add view in database for given docType.
'''
db["_design/%s" % docType.lower()] = {
"views": {
"all": {
"map": """function (doc) {
if (doc.docType === \"%s\") {
emit(doc._id, doc)
}
}""" % docType
},
"byFolder": {
"map": """function (doc) {
if (doc.docType === \"%s\") {
emit(doc.path, doc)
}
}""" % docType
},
"byFullPath": {
"map": """function (doc) {
if (doc.docType === \"%s\") {
emit(doc.path + '/' + doc.name, doc);
}
}""" % docType
}
},
"filters": {
"all": """function (doc, req) {
return doc.docType === \"%s\"
}""" % docType
}
}
def init_database_views(database):
'''
Initialize database:
* Create database
* Initialize folder, file, binary and device views
'''
db = get_db(database, credentials=False)
try:
init_database_view('Folder', db)
logger.info('[DB] Folder design document created')
except ResourceConflict:
logger.warn('[DB] Folder design document already exists')
try:
init_database_view('File', db)
logger.info('[DB] File design document created')
except ResourceConflict:
logger.warn('[DB] File design document already exists')
try:
db["_design/device"] = {
"views": {
"all": {
"map": """function (doc) {
if (doc.docType === \"Device\") {
emit(doc.login, doc)
}
}"""
},
"byUrl": {
"map": """function (doc) {
if (doc.docType === \"Device\") {
emit(doc.url, doc)
}
}"""
}
}
}
logger.info('[DB] Device design document created')
except ResourceConflict:
logger.warn('[DB] Device design document already exists')
try:
db["_design/binary"] = {
"views": {
"all": {
"map": """function (doc) {
if (doc.docType === \"Binary\") {
emit(doc._id, doc)
}
}"""
}
}
}
logger.info('[DB] Binary design document created')
except ResourceConflict:
logger.warn('[DB] Binary design document already exists')
def init_device(database, url, path, device_pwd, device_id):
'''
Create device objects wiht filter to apply to synchronize them.
'''
db = get_db(database)
device = get_device(database)
# Update device
device['password'] = device_pwd
device['change'] = 0
device['url'] = url
device['folder'] = path
device['configuration'] = ["File", "Folder", "Binary"]
db.save(device)
# Generate filter
conditions = "(doc.docType && ("
for docType in device["configuration"]:
conditions += 'doc.docType === "%s" || ' % docType
conditions = conditions[0:-3] + '))'
first_filter = """function(doc, req) {
if(doc._deleted || %s) {
return true;
} else {
return false;
}
}""" % conditions
doctype_filter = """function(doc, req) {
if (%s) {
return true;
} else {
return false;
}
}""" % conditions
doc = {
"_id": "_design/%s" % device_id,
"views": {},
"filters": {
"filter": first_filter,
"filterDocType": doctype_filter
}
}
try:
db.save(doc)
logger.info('[DB] Device filter created for device %s' % database)
except ResourceConflict:
logger.warn('[DB] Device filter document already exists')
return False
def get_disk_space(database, url, device, device_password):
# Recover disk space
db = get_db(database)
url = url.split('/')
try:
remote = "https://%s:%s@%s" % (device, device_password, url[2])
response = requests.get('%s/disk-space'%remote)
disk_space = json.loads(response.content)
# Store disk space
res = db.view('device/all')
for device in res:
device = device.value
device['diskSpace'] = disk_space['diskSpace']
db.save(device)
# Return disk space
return disk_space['diskSpace']
except:
# Recover information in database
res = db.view('device/all')
for device in res:
device = device.value
if 'diskSpace' in device:
return device['diskSpace']
else:
# Return arbitrary information
disk_space = {
"freeDiskSpace": 1,
"usedDiskSpace": 0,
"totalDiskSpace": 1
}
return disk_space
| cozy-labs/cozy-fuse | cozyfuse/dbutils.py | Python | bsd-3-clause | 10,338 | 0.000193 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMWARE external inventory script
=================================
shamelessly copied from existing inventory scripts.
This script and it's ini can be used more than once,
i.e vmware.py/vmware_colo.ini vmware_idf.py/vmware_idf.ini
(script can be link)
so if you don't have clustered vcenter but multiple esx machines or
just diff clusters you can have a inventory per each and automatically
group hosts based on file name or specify a group in the ini.
'''
import os
import sys
import time
import ConfigParser
from psphere.client import Client
from psphere.managedobjects import HostSystem
try:
import json
except ImportError:
import simplejson as json
def save_cache(cache_item, data, config):
''' saves item to cache '''
dpath = config.get('defaults', 'cache_dir')
try:
cache = open('/'.join([dpath,cache_item]), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError, e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('defaults', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath,cache_item]), 'r')
inv = json.loads(cache.read())
cache.close()
except IOError, e:
pass # not really sure what to do here
return inv
def cache_available(cache_item, config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('defaults', 'cache_dir'):
dpath = config.get('defaults', 'cache_dir')
try:
existing = os.stat( '/'.join([dpath,cache_item]))
except:
# cache doesn't exist or isn't accessible
return False
if config.has_option('defaults', 'cache_max_age'):
maxage = config.get('defaults', 'cache_max_age')
if (existing.st_mtime - int(time.time())) <= maxage:
return True
return False
def get_host_info(host):
''' Get variables about a specific host '''
hostinfo = {
'vmware_name' : host.name,
'vmware_tag' : host.tag,
'vmware_parent': host.parent.name,
}
for k in host.capability.__dict__.keys():
if k.startswith('_'):
continue
try:
hostinfo['vmware_' + k] = str(host.capability[k])
except:
continue
return hostinfo
def get_inventory(client, config):
''' Reads the inventory from cache or vmware api '''
if cache_available('inventory', config):
inv = get_cache('inventory',config)
else:
inv= { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if config.has_option('defaults', 'guests_only'):
guests_only = config.get('defaults', 'guests_only')
else:
guests_only = True
if not guests_only:
if config.has_option('defaults','hw_group'):
hw_group = config.get('defaults','hw_group')
else:
hw_group = default_group + '_hw'
inv[hw_group] = []
if config.has_option('defaults','vm_group'):
vm_group = config.get('defaults','vm_group')
else:
vm_group = default_group + '_vm'
inv[vm_group] = []
# Loop through physical hosts:
hosts = HostSystem.all(client)
for host in hosts:
if not guests_only:
inv['all']['hosts'].append(host.name)
inv[hw_group].append(host.name)
if host.tag:
taggroup = 'vmware_' + host.tag
if taggroup in inv:
inv[taggroup].append(host.name)
else:
inv[taggroup] = [ host.name ]
inv['_meta']['hostvars'][host.name] = get_host_info(host)
save_cache(vm.name, inv['_meta']['hostvars'][host.name], config)
for vm in host.vm:
inv['all']['hosts'].append(vm.name)
inv[vm_group].append(vm.name)
if vm.tag:
taggroup = 'vmware_' + vm.tag
if taggroup in inv:
inv[taggroup].append(vm.name)
else:
inv[taggroup] = [ vm.name ]
inv['_meta']['hostvars'][vm.name] = get_host_info(host)
save_cache(vm.name, inv['_meta']['hostvars'][vm.name], config)
save_cache('inventory', inv, config)
return json.dumps(inv)
def get_single_host(client, config, hostname):
inv = {}
if cache_available(hostname, config):
inv = get_cache(hostname,config)
else:
hosts = HostSystem.all(client) #TODO: figure out single host getter
for host in hosts:
if hostname == host.name:
inv = get_host_info(host)
break
for vm in host.vm:
if hostname == vm.name:
inv = get_host_info(host)
break
save_cache(hostname,inv,config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
client = Client( config.get('auth','host'),
config.get('auth','user'),
config.get('auth','password'),
)
except Exception, e:
client = None
#print >> STDERR "Unable to login (only cache avilable): %s", str(e)
# acitually do the work
if hostname is None:
inventory = get_inventory(client, config)
else:
inventory = get_single_host(client, config, hostname)
# return to ansible
print inventory
| mwarkentin/ansible | plugins/inventory/vmware.py | Python | gpl-3.0 | 6,210 | 0.007085 |
from selenium.webdriver.support.select import Select
def get_selected_option(browser, css_selector):
# Takes a css selector for a <select> element and returns the value of
# the selected option
select = Select(browser.find_element_by_css_selector(css_selector))
return select.first_selected_option.get_attribute('value')
| egineering-llc/egat_example_project | tests/test_helpers/selenium_helper.py | Python | mit | 338 | 0.002959 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-12-15 19:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='date',
field=models.DateField(help_text='Date of creation or last known time'),
),
migrations.AlterField(
model_name='item',
name='details',
field=models.TextField(blank=True, help_text='Any details about the item', null=True),
),
migrations.AlterField(
model_name='item',
name='file',
field=models.FileField(help_text='The file that should be uploaded', upload_to='archive/'),
),
migrations.AlterField(
model_name='item',
name='name',
field=models.CharField(blank=True, help_text='Displayed name rather than file name (Note. filename will still be shown)', max_length=40, null=True),
),
migrations.AlterField(
model_name='item',
name='type',
field=models.CharField(choices=[('im', 'Image'), ('tx', 'Text File'), ('we', 'Website File')], default='tx', max_length=2),
),
]
| WarwickAnimeSoc/aniMango | archive/migrations/0002_auto_20181215_1934.py | Python | mit | 1,365 | 0.003663 |
# Copyright (c) 2010 Spotify AB
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EMR service
"""
import types
import boto
import boto.utils
from boto.ec2.regioninfo import RegionInfo
from boto.emr.emrobject import JobFlow, RunJobFlowResponse
from boto.emr.emrobject import AddInstanceGroupsResponse, ModifyInstanceGroupsResponse
from boto.emr.step import JarStep
from boto.connection import AWSQueryConnection
from boto.exception import EmrResponseError
class EmrConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
'elasticmapreduce.amazonaws.com')
ResponseError = EmrResponseError
# Constants for AWS Console debugging
DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/'):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path)
def _required_auth_capability(self):
return ['emr']
def describe_jobflow(self, jobflow_id):
"""
Describes a single Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: The job flow id of interest
"""
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
if jobflows:
return jobflows[0]
def describe_jobflows(self, states=None, jobflow_ids=None,
created_after=None, created_before=None):
"""
Retrieve all the Elastic MapReduce job flows on your account
:type states: list
:param states: A list of strings with job flow states wanted
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
:type created_after: datetime
:param created_after: Bound on job flow creation time
:type created_before: datetime
:param created_before: Bound on job flow creation time
"""
params = {}
if states:
self.build_list_params(params, states, 'JobFlowStates.member')
if jobflow_ids:
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
def terminate_jobflow(self, jobflow_id):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: A jobflow id
"""
self.terminate_jobflows([jobflow_id])
def terminate_jobflows(self, jobflow_ids):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
"""
params = {}
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
return self.get_status('TerminateJobFlows', params, verb='POST')
def add_jobflow_steps(self, jobflow_id, steps):
"""
Adds steps to a jobflow
:type jobflow_id: str
:param jobflow_id: The job flow id
:type steps: list(boto.emr.Step)
:param steps: A list of steps to add to the job
"""
if type(steps) != types.ListType:
steps = [steps]
params = {}
params['JobFlowId'] = jobflow_id
# Step args
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
return self.get_object(
'AddJobFlowSteps', params, RunJobFlowResponse, verb='POST')
def add_instance_groups(self, jobflow_id, instance_groups):
"""
Adds instance groups to a running cluster.
:type jobflow_id: str
:param jobflow_id: The id of the jobflow which will take the
new instance groups
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: A list of instance groups to add to the job
"""
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
params['JobFlowId'] = jobflow_id
params.update(self._build_instance_group_list_args(instance_groups))
return self.get_object('AddInstanceGroups', params,
AddInstanceGroupsResponse, verb='POST')
def modify_instance_groups(self, instance_group_ids, new_sizes):
"""
Modify the number of nodes and configuration settings in an
instance group.
:type instance_group_ids: list(str)
:param instance_group_ids: A list of the ID's of the instance
groups to be modified
:type new_sizes: list(int)
:param new_sizes: A list of the new sizes for each instance group
"""
if type(instance_group_ids) != types.ListType:
instance_group_ids = [instance_group_ids]
if type(new_sizes) != types.ListType:
new_sizes = [new_sizes]
instance_groups = zip(instance_group_ids, new_sizes)
params = {}
for k, ig in enumerate(instance_groups):
# could be wrong - the example amazon gives uses
# InstanceRequestCount, while the api documentation
# says InstanceCount
params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0]
params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1]
return self.get_object('ModifyInstanceGroups', params,
ModifyInstanceGroupsResponse, verb='POST')
def run_jobflow(self, name, log_uri=None, ec2_keyname=None,
availability_zone=None,
master_instance_type='m1.small',
slave_instance_type='m1.small', num_instances=1,
action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
enable_debugging=False,
hadoop_version=None,
steps=[],
bootstrap_actions=[],
instance_groups=None,
additional_info=None,
ami_version=None,
api_params=None):
"""
Runs a job flow
:type name: str
:param name: Name of the job flow
:type log_uri: str
:param log_uri: URI of the S3 bucket to place logs
:type ec2_keyname: str
:param ec2_keyname: EC2 key used for the instances
:type availability_zone: str
:param availability_zone: EC2 availability zone of the cluster
:type master_instance_type: str
:param master_instance_type: EC2 instance type of the master
:type slave_instance_type: str
:param slave_instance_type: EC2 instance type of the slave nodes
:type num_instances: int
:param num_instances: Number of instances in the Hadoop cluster
:type action_on_failure: str
:param action_on_failure: Action to take if a step terminates
:type keep_alive: bool
:param keep_alive: Denotes whether the cluster should stay
alive upon completion
:type enable_debugging: bool
:param enable_debugging: Denotes whether AWS console debugging
should be enabled.
:type hadoop_version: str
:param hadoop_version: Version of Hadoop to use. This no longer
defaults to '0.20' and now uses the AMI default.
:type steps: list(boto.emr.Step)
:param steps: List of steps to add with the job
:type bootstrap_actions: list(boto.emr.BootstrapAction)
:param bootstrap_actions: List of bootstrap actions that run
before Hadoop starts.
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: Optional list of instance groups to
use when creating this job.
NB: When provided, this argument supersedes num_instances
and master/slave_instance_type.
:type ami_version: str
:param ami_version: Amazon Machine Image (AMI) version to use
for instances. Values accepted by EMR are '1.0', '2.0', and
'latest'; EMR currently defaults to '1.0' if you don't set
'ami_version'.
:type additional_info: JSON str
:param additional_info: A JSON string for selecting additional features
:type api_params: dict
:param api_params: a dictionary of additional parameters to pass
directly to the EMR API (so you don't have to upgrade boto to
use new EMR features). You can also delete an API parameter
by setting it to None.
:rtype: str
:return: The jobflow id
"""
params = {}
if action_on_failure:
params['ActionOnFailure'] = action_on_failure
if log_uri:
params['LogUri'] = log_uri
params['Name'] = name
# Common instance args
common_params = self._build_instance_common_args(ec2_keyname,
availability_zone,
keep_alive,
hadoop_version)
params.update(common_params)
# NB: according to the AWS API's error message, we must
# "configure instances either using instance count, master and
# slave instance type or instance groups but not both."
#
# Thus we switch here on the truthiness of instance_groups.
if not instance_groups:
# Instance args (the common case)
instance_params = self._build_instance_count_and_type_args(
master_instance_type,
slave_instance_type,
num_instances)
params.update(instance_params)
else:
# Instance group args (for spot instances or a heterogenous cluster)
list_args = self._build_instance_group_list_args(instance_groups)
instance_params = dict(
('Instances.%s' % k, v) for k, v in list_args.iteritems()
)
params.update(instance_params)
# Debugging step from EMR API docs
if enable_debugging:
debugging_step = JarStep(name='Setup Hadoop Debugging',
action_on_failure='TERMINATE_JOB_FLOW',
main_class=None,
jar=self.DebuggingJar,
step_args=self.DebuggingArgs)
steps.insert(0, debugging_step)
# Step args
if steps:
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
if bootstrap_actions:
bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
params.update(self._build_bootstrap_action_list(bootstrap_action_args))
if ami_version:
params['AmiVersion'] = ami_version
if additional_info is not None:
params['AdditionalInfo'] = additional_info
if api_params:
for key, value in api_params.iteritems():
if value is None:
params.pop(key, None)
else:
params[key] = value
response = self.get_object(
'RunJobFlow', params, RunJobFlowResponse, verb='POST')
return response.jobflowid
def set_termination_protection(self, jobflow_id,
termination_protection_status):
"""
Set termination protection on specified Elastic MapReduce job flows
:type jobflow_ids: list or str
:param jobflow_ids: A list of job flow IDs
:type termination_protection_status: bool
:param termination_protection_status: Termination protection status
"""
assert termination_protection_status in (True, False)
params = {}
params['TerminationProtected'] = (termination_protection_status and "true") or "false"
self.build_list_params(params, [jobflow_id], 'JobFlowIds.member')
return self.get_status('SetTerminationProtection', params, verb='POST')
def _build_bootstrap_action_args(self, bootstrap_action):
bootstrap_action_params = {}
bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path
try:
bootstrap_action_params['Name'] = bootstrap_action.name
except AttributeError:
pass
args = bootstrap_action.args()
if args:
self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')
return bootstrap_action_params
def _build_step_args(self, step):
step_params = {}
step_params['ActionOnFailure'] = step.action_on_failure
step_params['HadoopJarStep.Jar'] = step.jar()
main_class = step.main_class()
if main_class:
step_params['HadoopJarStep.MainClass'] = main_class
args = step.args()
if args:
self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')
step_params['Name'] = step.name
return step_params
def _build_bootstrap_action_list(self, bootstrap_actions):
if type(bootstrap_actions) != types.ListType:
bootstrap_actions = [bootstrap_actions]
params = {}
for i, bootstrap_action in enumerate(bootstrap_actions):
for key, value in bootstrap_action.iteritems():
params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value
return params
def _build_step_list(self, steps):
if type(steps) != types.ListType:
steps = [steps]
params = {}
for i, step in enumerate(steps):
for key, value in step.iteritems():
params['Steps.member.%s.%s' % (i+1, key)] = value
return params
def _build_instance_common_args(self, ec2_keyname, availability_zone,
keep_alive, hadoop_version):
"""
Takes a number of parameters used when starting a jobflow (as
specified in run_jobflow() above). Returns a comparable dict for
use in making a RunJobFlow request.
"""
params = {
'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),
}
if hadoop_version:
params['Instances.HadoopVersion'] = hadoop_version
if ec2_keyname:
params['Instances.Ec2KeyName'] = ec2_keyname
if availability_zone:
params['Instances.Placement.AvailabilityZone'] = availability_zone
return params
def _build_instance_count_and_type_args(self, master_instance_type,
slave_instance_type, num_instances):
"""
Takes a master instance type (string), a slave instance type
(string), and a number of instances. Returns a comparable dict
for use in making a RunJobFlow request.
"""
params = {
'Instances.MasterInstanceType' : master_instance_type,
'Instances.SlaveInstanceType' : slave_instance_type,
'Instances.InstanceCount' : num_instances,
}
return params
def _build_instance_group_args(self, instance_group):
"""
Takes an InstanceGroup; returns a dict that, when its keys are
properly prefixed, can be used for describing InstanceGroups in
RunJobFlow or AddInstanceGroups requests.
"""
params = {
'InstanceCount' : instance_group.num_instances,
'InstanceRole' : instance_group.role,
'InstanceType' : instance_group.type,
'Name' : instance_group.name,
'Market' : instance_group.market
}
if instance_group.market == 'SPOT':
params['BidPrice'] = instance_group.bidprice
return params
def _build_instance_group_list_args(self, instance_groups):
"""
Takes a list of InstanceGroups, or a single InstanceGroup. Returns
a comparable dict for use in making a RunJobFlow or AddInstanceGroups
request.
"""
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
for i, instance_group in enumerate(instance_groups):
ig_dict = self._build_instance_group_args(instance_group)
for key, value in ig_dict.iteritems():
params['InstanceGroups.member.%d.%s' % (i+1, key)] = value
return params
| darcyliu/storyboard | boto/emr/connection.py | Python | mit | 19,314 | 0.002537 |
from . import db
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %s>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %s>' % self.username
| mfwarren/FreeCoding | 2014/12/fc_30/app/models.py | Python | mit | 596 | 0 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Jags(AutotoolsPackage):
"""JAGS is Just Another Gibbs Sampler. It is a program for analysis of
Bayesian hierarchical models using Markov Chain Monte Carlo (MCMC)
simulation not wholly unlike BUGS"""
homepage = "http://mcmc-jags.sourceforge.net/"
url = "https://downloads.sourceforge.net/project/mcmc-jags/JAGS/4.x/Source/JAGS-4.2.0.tar.gz"
version('4.2.0', '9e521b3cfb23d3290a8c6bc0b79bf426')
depends_on('blas')
depends_on('lapack')
def configure_args(self):
args = ['--with-blas=-L%s' % self.spec['blas'].prefix.lib,
'--with-lapack=-L%s' % self.spec['lapack'].prefix.lib]
return args
| skosukhin/spack | var/spack/repos/builtin/packages/jags/package.py | Python | lgpl-2.1 | 1,927 | 0.000519 |
import logging
import re
import socket
from mopidy.config import validators
from mopidy.internal import log, path
def decode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char.encode(encoding="unicode-escape").decode(), char
)
return value
def encode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char, char.encode(encoding="unicode-escape").decode()
)
return value
class DeprecatedValue:
pass
class ConfigValue:
"""Represents a config key's value and how to handle it.
Normally you will only be interacting with sub-classes for config values
that encode either deserialization behavior and/or validation.
Each config value should be used for the following actions:
1. Deserializing from a raw string and validating, raising ValueError on
failure.
2. Serializing a value back to a string that can be stored in a config.
3. Formatting a value to a printable form (useful for masking secrets).
:class:`None` values should not be deserialized, serialized or formatted,
the code interacting with the config should simply skip None config values.
"""
def deserialize(self, value):
"""Cast raw string to appropriate type."""
return decode(value)
def serialize(self, value, display=False):
"""Convert value back to string for saving."""
if value is None:
return ""
return str(value)
class Deprecated(ConfigValue):
"""Deprecated value.
Used for ignoring old config values that are no longer in use, but should
not cause the config parser to crash.
"""
def deserialize(self, value):
return DeprecatedValue()
def serialize(self, value, display=False):
return DeprecatedValue()
class String(ConfigValue):
"""String value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = choices
def deserialize(self, value):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
validators.validate_choice(value, self._choices)
return value
def serialize(self, value, display=False):
if value is None:
return ""
return encode(value)
class Secret(String):
"""Secret string value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
Should be used for passwords, auth tokens etc. Will mask value when being
displayed.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = None # Choices doesn't make sense for secrets
def serialize(self, value, display=False):
if value is not None and display:
return "********"
return super().serialize(value, display)
class Integer(ConfigValue):
"""Integer value."""
def __init__(
self, minimum=None, maximum=None, choices=None, optional=False
):
self._required = not optional
self._minimum = minimum
self._maximum = maximum
self._choices = choices
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
value = int(value)
validators.validate_choice(value, self._choices)
validators.validate_minimum(value, self._minimum)
validators.validate_maximum(value, self._maximum)
return value
class Boolean(ConfigValue):
"""Boolean value.
Accepts ``1``, ``yes``, ``true``, and ``on`` with any casing as
:class:`True`.
Accepts ``0``, ``no``, ``false``, and ``off`` with any casing as
:class:`False`.
"""
true_values = ("1", "yes", "true", "on")
false_values = ("0", "no", "false", "off")
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
if value.lower() in self.true_values:
return True
elif value.lower() in self.false_values:
return False
raise ValueError(f"invalid value for boolean: {value!r}")
def serialize(self, value, display=False):
if value is True:
return "true"
elif value in (False, None):
return "false"
else:
raise ValueError(f"{value!r} is not a boolean")
class List(ConfigValue):
"""List value.
Supports elements split by commas or newlines. Newlines take presedence and
empty list items will be filtered out.
"""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
if "\n" in value:
values = re.split(r"\s*\n\s*", value)
else:
values = re.split(r"\s*,\s*", value)
values = tuple(v.strip() for v in values if v.strip())
validators.validate_required(values, self._required)
return tuple(values)
def serialize(self, value, display=False):
if not value:
return ""
return "\n " + "\n ".join(encode(v) for v in value if v)
class LogColor(ConfigValue):
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), log.COLORS)
return value.lower()
def serialize(self, value, display=False):
if value.lower() in log.COLORS:
return encode(value.lower())
return ""
class LogLevel(ConfigValue):
"""Log level value.
Expects one of ``critical``, ``error``, ``warning``, ``info``, ``debug``,
``trace``, or ``all``, with any casing.
"""
levels = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
"trace": log.TRACE_LOG_LEVEL,
"all": logging.NOTSET,
}
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), self.levels.keys())
return self.levels.get(value.lower())
def serialize(self, value, display=False):
lookup = {v: k for k, v in self.levels.items()}
if value in lookup:
return encode(lookup[value])
return ""
class Hostname(ConfigValue):
"""Network hostname value."""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value, display=False):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
socket_path = path.get_unix_socket_path(value)
if socket_path is not None:
path_str = Path(not self._required).deserialize(socket_path)
return f"unix:{path_str}"
try:
socket.getaddrinfo(value, None)
except OSError:
raise ValueError("must be a resolveable hostname or valid IP")
return value
class Port(Integer):
"""Network port value.
Expects integer in the range 0-65535, zero tells the kernel to simply
allocate a port for us.
"""
def __init__(self, choices=None, optional=False):
super().__init__(
minimum=0, maximum=2 ** 16 - 1, choices=choices, optional=optional
)
class _ExpandedPath(str):
def __new__(cls, original, expanded):
return super().__new__(cls, expanded)
def __init__(self, original, expanded):
self.original = original
class Path(ConfigValue):
"""File system path.
The following expansions of the path will be done:
- ``~`` to the current user's home directory
- ``$XDG_CACHE_DIR`` according to the XDG spec
- ``$XDG_CONFIG_DIR`` according to the XDG spec
- ``$XDG_DATA_DIR`` according to the XDG spec
- ``$XDG_MUSIC_DIR`` according to the XDG spec
"""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value).strip()
expanded = path.expand_path(value)
validators.validate_required(value, self._required)
validators.validate_required(expanded, self._required)
if not value or expanded is None:
return None
return _ExpandedPath(value, expanded)
def serialize(self, value, display=False):
if isinstance(value, _ExpandedPath):
value = value.original
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
return value
| kingosticks/mopidy | mopidy/config/types.py | Python | apache-2.0 | 9,146 | 0 |
#import coin
class IDatabase:
def enter_coin(coin):
raise Exception('NotImplementedError') | SVladkov/Numismatic | database/idatabase.py | Python | gpl-3.0 | 94 | 0.042553 |
from django.test import TestCase
from ships.models import *
class ShipModelTest(TestCase):
pass
| davidgillies/ships | ships_proj/ships/tests/test_models.py | Python | gpl-2.0 | 109 | 0.018349 |
import sys
import pickle
##########################################################
# usage
# pypy find_2g.py xid_train.p ../../data/train
# xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify
# the order of samples in traing data
# ../../data/train is the path of original train data
##########################################################
xid_name=sys.argv[1]
data_path=sys.argv[2]
xid=pickle.load(open(xid_name)) #xid_train.p or xid_test.p
newc=pickle.load(open('newc.p'))
cmd2g={}
for i in newc:
for j in newc:
cmd2g[(i,j)]=0
print newc
for c,f in enumerate(xid):#(files[len(files)/10*a1:len(files)/10*a2]):
count={}
for i in cmd2g:
count[i]=0
fo=open(data_path+'/'+f+'.asm')
tot=0
a=-1
b=-1
for line in fo:
xx=line.split()
for x in xx:
if x in newc:
a=b
b=x
if (a,b) in cmd2g:
count[(a,b)]+=1
tot+=1
# print (b,a)
fo.close()
if c%10==0:
print c*1.0/len(xid),tot
for i in cmd2g:
cmd2g[i]=count[i]+cmd2g[i]
del count
import pickle
cmd2gx={}
for i in cmd2g:
if cmd2g[i]>10:
cmd2gx[i]=cmd2g[i]
print len(cmd2gx)
pickle.dump(cmd2gx,open('cmd2g.p','w'))
| bikash/kaggleCompetition | microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/find_2g.py | Python | apache-2.0 | 1,327 | 0.027882 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------------------------------------
dlg_subida
mantém as informações sobre a dialog de subida
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
revision 0.2 2015/nov mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
---------------------------------------------------------------------------------------------------
"""
__version__ = "$revision: 0.2$"
__author__ = "mlabru, sophosoft"
__date__ = "2015/12"
# < imports >--------------------------------------------------------------------------------------
# python library
import json
import os
# PyQt library
from PyQt4 import QtCore
from PyQt4 import QtGui
# view
import view.piloto.dlg_subida_ui as dlg
# < class CDlgSubida >-----------------------------------------------------------------------------
class CDlgSubida(QtGui.QDialog, dlg.Ui_CDlgSubida):
"""
mantém as informações sobre a dialog de subida
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, fsck_http, fdct_config, f_strip_cur, fdct_sub, f_parent=None):
"""
@param fsck_http: socket de comunicação com o servidor
@param fdct_config: dicionário de configuração
@param f_strip_cur: strip selecionada
@param fdct_sub: dicionário de subidas
@param f_parent: janela pai
"""
# init super class
super(CDlgSubida, self).__init__(f_parent)
# socket de comunicação
self.__sck_http = fsck_http
assert self.__sck_http
# dicionário de configuração
self.__dct_config = fdct_config
assert self.__dct_config is not None
# dicionário de subidas
self.__dct_sub = fdct_sub
assert self.__dct_sub is not None
# monta a dialog
self.setupUi(self)
# configura título da dialog
self.setWindowTitle(u"Procedimento de Subida")
# configurações de conexões slot/signal
self.__config_connects()
# configurações de títulos e mensagens da janela de edição
self.__config_texts()
# restaura as configurações da janela de edição
self.__restore_settings()
# dicionário de subidas vazio ?
if not self.__dct_sub:
# carrega o dicionário
self.__load_sub()
# inicia valores
self.cbx_sub.addItems(sorted(self.__dct_sub.values()))
# configura botões
self.bbx_subida.button(QtGui.QDialogButtonBox.Cancel).setText("&Cancela")
self.bbx_subida.button(QtGui.QDialogButtonBox.Ok).setFocus()
# inicia os parâmetros da subida
self.__update_command()
# ---------------------------------------------------------------------------------------------
def __config_connects(self):
"""
configura as conexões slot/signal
"""
# conecta spinBox
self.cbx_sub.currentIndexChanged.connect(self.__on_cbx_currentIndexChanged)
# ---------------------------------------------------------------------------------------------
def __config_texts(self):
"""
DOCUMENT ME!
"""
# configura títulos e mensagens
self.__txt_settings = "CDlgSubida"
# ---------------------------------------------------------------------------------------------
def get_data(self):
"""
DOCUMENT ME!
"""
# return command line
return self.lbl_comando.text()
# ---------------------------------------------------------------------------------------------
def __load_sub(self):
"""
carrega o dicionário de subidas
"""
# check for requirements
assert self.__sck_http is not None
assert self.__dct_config is not None
assert self.__dct_sub is not None
# monta o request das subidas
ls_req = "data/sub.json"
# get server address
l_srv = self.__dct_config.get("srv.addr", None)
if l_srv is not None:
# obtém os dados de subidas do servidor
l_dict = self.__sck_http.get_data(l_srv, ls_req)
if l_dict is not None:
# coloca a subidas no dicionário
self.__dct_sub.update(json.loads(l_dict))
# senão, não achou no servidor...
else:
# logger
l_log = logging.getLogger("CDlgSubida::__load_sub")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E01: tabela de subidas não existe no servidor.")
# senão, não achou endereço do servidor
else:
# logger
l_log = logging.getLogger("CDlgSubida::__load_sub")
l_log.setLevel(logging.WARNING)
l_log.warning(u"<E02: srv.addr não existe na configuração.")
# ---------------------------------------------------------------------------------------------
def __restore_settings(self):
"""
restaura as configurações salvas para esta janela
"""
# obtém os settings
l_set = QtCore.QSettings("sophosoft", "piloto")
assert l_set
# restaura geometria da janela
self.restoreGeometry(l_set.value("%s/Geometry" % (self.__txt_settings)).toByteArray())
# ---------------------------------------------------------------------------------------------
def __update_command(self):
"""
DOCUMENT ME!
"""
# para todas as subidas...
for l_key, l_sub in self.__dct_sub.iteritems():
# é a subida selecionada ?
if self.cbx_sub.currentText() == l_sub:
break
# inicia o comando
ls_cmd = "SUB {}".format(l_key)
# coloca o comando no label
self.lbl_comando.setText(ls_cmd)
# =============================================================================================
# edição de campos
# =============================================================================================
# ---------------------------------------------------------------------------------------------
@QtCore.pyqtSignature("int")
def __on_cbx_currentIndexChanged(self, f_val):
"""
DOCUMENT ME!
"""
# atualiza comando
self.__update_command()
# < the end >--------------------------------------------------------------------------------------
| mlabru/ptracks | view/piloto/dlg_subida.py | Python | gpl-3.0 | 7,178 | 0.001123 |
# Copyright 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
import a10_openstack_lib.resources.a10_device_instance as a10_device_instance
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import resource_helper
from neutron.common import exceptions
from neutron.services import service_base
import a10_neutron_lbaas.neutron_ext.common.constants as constants
from a10_neutron_lbaas.neutron_ext.common import resources
RESOURCE_ATTRIBUTE_MAP = resources.apply_template(a10_device_instance.RESOURCE_ATTRIBUTE_MAP,
attributes)
# TODO(rename this to *Extension to avoid config file confusion)
class A10DeviceInstance(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "A10 Device Instances"
@classmethod
def get_alias(cls):
return constants.A10_DEVICE_INSTANCE_EXT
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/a10_device_instance/api/v1.0"
@classmethod
def get_updated(cls):
return "2015-11-18T16:17:00-07:00"
@classmethod
def get_description(cls):
return ("A10 Device Instances")
@classmethod
def get_resources(cls):
"""Returns external resources."""
my_plurals = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attributes.PLURALS.update(my_plurals)
attr_map = RESOURCE_ATTRIBUTE_MAP
resources = resource_helper.build_resource_info(my_plurals,
attr_map,
constants.A10_DEVICE_INSTANCE)
return resources
def update_attributes_map(self, attributes):
super(A10DeviceInstance, self).update_attributes_map(
attributes,
extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class A10DeviceInstanceNotFoundError(exceptions.NotFound):
def __init__(self, a10_device_instance_id):
self.msg = _("A10 Device Instance {} could not be found.")
super(A10DeviceInstanceNotFoundError, self).__init__()
class A10DeviceInstanceInUseError(exceptions.InUse):
def __init__(self, a10_device_instance_id):
self.message = _("A10 Device Instance is in use and cannot be deleted.")
self.msg = self.message
super(A10DeviceInstanceInUseError, self).__init__()
@six.add_metaclass(abc.ABCMeta)
class A10DeviceInstancePluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.A10_DEVICE_INSTANCE
def get_plugin_description(self):
return constants.A10_DEVICE_INSTANCE
def get_plugin_type(self):
return constants.A10_DEVICE_INSTANCE
def __init__(self):
super(A10DeviceInstancePluginBase, self).__init__()
@abc.abstractmethod
def get_a10_device_instances(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def create_a10_device_instance(self, context, device_instance):
pass
@abc.abstractmethod
def get_a10_device_instance(self, context, id, fields=None):
pass
| dougwig/a10-neutron-lbaas | a10_neutron_lbaas/neutron_ext/extensions/a10DeviceInstance.py | Python | apache-2.0 | 4,037 | 0.000991 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class DiagnosticSettingsCategoryResource(ProxyOnlyResource):
"""The diagnostic settings category resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param category_type: The type of the diagnostic settings category.
Possible values include: 'Metrics', 'Logs'
:type category_type: str or ~azure.mgmt.monitor.models.CategoryType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'category_type': {'key': 'properties.categoryType', 'type': 'CategoryType'},
}
def __init__(self, category_type=None):
super(DiagnosticSettingsCategoryResource, self).__init__()
self.category_type = category_type
| AutorestCI/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/diagnostic_settings_category_resource.py | Python | mit | 1,650 | 0.000606 |
# -*- encoding: utf-8 -*-
"""Test class for InterSatellite Sync feature
:Requirement: Satellitesync
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.decorators import (
run_only_on,
stubbed,
tier1,
tier3,
upgrade
)
from robottelo.test import UITestCase
class InterSatelliteSyncTestCase(UITestCase):
"""Implements InterSatellite Sync tests in UI"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_show_repo_export_history(self):
"""Product history shows repo export history on export.
:id: 01d82253-081b-4d11-9a5b-e6052173fe47
:steps: Export a repo to a specified location in settings.
:expectedresults: Repo/Product history should reflect the export
history with user and time.
:caseautomation: notautomated
:CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_show_cv_export_history(self):
"""CV history shows CV version export history on export.
:id: 06e26cca-e262-4eff-b8d7-fbca504a8acb
:steps: Export a CV to a specified location in settings.
:expectedresults: CV history should reflect the export history with
user, version, action and time.
:caseautomation: notautomated
:CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_positive_update_cdn_url(self):
"""Update CDN URL to import from upstream.
:id: 5ff30764-a1b1-48df-a6a1-0f1d23f883b9
:steps:
1. In upstream, Export Redhat repo/CV to a directory.
2. Copy exported contents to /var/www/html.
3. In downstream, Update CDN URL with step 2 location to import the
Redhat contents.
4. Enable and sync the imported repo from Redhat Repositories page.
:expectedresults:
1. The CDN URL is is updated successfully.
2. The imported repo is enabled and sync.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_negative_update_cdn_url(self):
"""Update non existing CDN URL to import from upstream.
:id: 4bf74712-dac8-447b-9c9f-227a41cdec4d
:steps:
1. In downstream, Update CDN URL with some non existing url.
2. Attempt to Enable and sync some repo from Redhat Repositories
page.
:expectedresults:
1. The CDN URL is not allowed to update any non existing url.
2. None of the repo is allowed to enable and sync.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_restrict_other_redhat_repo_import(self):
"""Restrict the import/sync of non exported repos.
:id: 7091ca13-7f58-4733-87d5-1fa3670bfcee
:steps:
1. Export Red Hat YUM repo to path which will be accessible over
HTTP.
2. Define the CDN URL the same as the exported HTTP URL.
3. Attempt to Import/Enable non exported repos from Redhat
Repositories page.
:expectedresults: The import of non exported repos is restricted.
:caseautomation: notautomated
:CaseLevel: System
"""
| sghai/robottelo | tests/foreman/ui/test_satellitesync.py | Python | gpl-3.0 | 3,555 | 0 |
import os
import re
import copy
import paramiko
import StringIO
import unicodedata
from paramiko import SSHException, BadHostKeyException
# CloudScape Libraries
from cloudscape.common import config
from cloudscape.common import logger
from cloudscape.common.scp import SCPClient
from cloudscape.common.utils import valid, invalid
class RemoteConnect(object):
"""
Wapper class designed to handle remote SSH connections to both Linux
and Windows hosts. Provides methods to open a connection, run remote
commands, as well as basic SCP functionality.
The connection object can use either password or SSH key authentication
to connect to the remote host.
Setting up a connection::
# Import the class
from cloudscape.common.remote import RemoteConnect
# SSH password connection parameters. Note that 'port' is optional for
# both password and key authentication and defaults to 22.
pass_params = {
'host': '192.168.0.12',
'user': 'admin',
'password': 'secret',
'port': 22
}
# SSH key connection parameters. The key parameter can either be a path
# to a file, or a private key you have read into a string.
key_params = {
'host': '192.168.0.12',
'user': 'admin',
'key': '/home/user/.ssh/private_key'
}
# Setup the connection
remote = RemoteConnect('linux', key_params)
"""
def __init__(self, sys_type='linux', conn={}):
"""
Initialize a Paramiko SSH connection.
:param sys_type: The type of machine to connection, either 'linux' or 'windows'. Defaults to 'linux'
:type sys_type: str
:param conn: SSH connection parameters.
:type conn: dict
"""
self.conf = config.parse()
self.log = logger.create(__name__, self.conf.server.log)
# Valid system types
self.sys_types = ['linux', 'windows']
# Required connection parameters
self.conn_attr = {
'linux': ['host', 'user'],
'windows': ['host', 'user']
}
# Remote system type and connection parameters
self.sys_type = sys_type
self.sys_conn = copy.copy(conn)
# Make sure connection parameters are valid
self.params_valid = self._validate()
# Set any default connection parameters
if self.params_valid['valid'] == True:
self._set_defaults()
""" ERROR MESSAGES
Method to define string messages for internal error codes.
"""
def _error(self, code):
# Error code numbers and messages
error_codes = {
000: "Missing required 'type' parameter",
001: "Unsupported 'type' parameter '%s' - must be one of '%s'" % (self.sys_type, str(self.sys_types)),
002: "Missing required 'conn' paremeter for remote connection details",
003: "Remote commands must be in list format",
004: "Files argument must be in list format",
005: "Cannot use sudo on host '%s', no password provided'" % self.sys_conn['host'],
100: "Must supply a 'passwd' or 'key' connection parameter for system type '%s'" % (self.sys_type),
101: "Missing a required connection parameter for system type '%s', must supply '%s'" % (self.sys_type, str(self.conn_attr[self.sys_type])),
999: "An unknown error occured when creating the remote connection"
}
# Return the error message
if not code or not code in error_codes:
return error_codes[999]
else:
return error_codes[code]
""" SET DEFAULTS
Set any defaults for unspecific, optional connection parameters depending
on the system type.
"""
def _set_defaults(self):
# System Defaults
if not 'port' in self.sys_conn or not self.sys_conn['port']:
self.sys_conn['port'] = 22
""" LOAD SSH KEY
Method to load an SSH key for a Linux connection into a Parmiko object.
"""
def _load_ssh_key(self):
if os.path.isfile(self.sys_conn['key']):
key_obj = paramiko.RSAKey.from_private_key_file(self.sys_conn['key'])
else:
key_str = unicodedata.normalize('NFKD', self.sys_conn['key']).encode('ascii', 'ignore')
key_fo = StringIO.StringIO(key_str)
key_obj = paramiko.RSAKey.from_private_key(key_fo)
return key_obj
""" VALIDATE PARAMETERS
Make sure the system type and connection parameters are valid. Check the
connection parameters based on the system type.
"""
def _validate(self):
# Require a system type parameter
if not self.sys_type:
return invalid(self._error(000))
# Make sure the system type is supported
if not self.sys_type in self.sys_types:
return invalid(self._error(001))
# Require system connection parameters
if not self.sys_conn or not isinstance(self.sys_conn, dict):
return invalid(self._error(002))
# Windows system type validation
if self.sys_type == 'windows':
# Require an SSH key
if not 'key' in self.sys_conn:
return invalid(self._error(100))
# Linux system type validation
if self.sys_type == 'linux':
# Make sure either a key or password are set
if not ('passwd' in self.sys_conn) and not ('key' in self.sys_conn):
return invalid(self._error(100))
# Make sure the required parameters are set
for param in self.conn_attr[self.sys_type]:
if not param in self.sys_conn:
return invalid(self._error(101))
# If a key is specified, read into an RSA object
if 'key' in self.sys_conn and self.sys_conn['key']:
self.auth_type = 'key'
self.sys_conn['key'] = self._load_ssh_key()
else:
self.auth_type = 'passwd'
self.sys_conn['key'] = None
# Connection parameters OK
return valid()
""" Connection Handler """
def _connect(self):
try:
if self.auth_type == 'key':
self.client_exec.connect(self.sys_conn['host'],
username = self.sys_conn['user'],
port = int(self.sys_conn['port']),
pkey = self.sys_conn['key'])
else:
self.client_exec.connect(self.sys_conn['host'],
username = self.sys_conn['user'],
port = int(self.sys_conn['port']),
password = self.sys_conn['passwd'])
except BadHostKeyException as e:
self.log.exception('Failed to establish SSH connection: %s' % str(e))
return invalid(str(e))
# Open the SCP client
self.client_copy = SCPClient(self.client_exec.get_transport())
return valid()
def open(self):
"""
Open the connection to the remote host with the constructed connection
object. This class is called internally to the API, so the return object
is constructed accordingly.
The open method returns a dictionary with two
elements, 'valid' and 'content'. If the connection failed, 'valid' is set
to False and 'content' contains the error. Otherwise, 'valid' is set to True.
:rtype: dictionary
Opening a connection::
# Attempt to connect
status = remote.open()
# If the connection failed to open
if not status['valid']:
# You can return the object to the calling module or raise your own Exception
return status
"""
# Make sure connection parameters are valid
if not self.params_valid['valid']:
self.log.error(self.params_valid['content'])
return invalid(self.params_valid['content'])
# Remote command and file copy clients
self.client_exec = None
self.client_copy = None
# Set the client for remote commands
self.client_exec = paramiko.SSHClient()
self.client_exec.load_system_host_keys()
self.client_exec.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Try to establish the parent SSH connection
try:
auth_type = 'key' if (self.auth_type == 'key') else 'password'
self.log.info('Attempting %s authentication to \'%s@%s\'' % (auth_type, self.sys_conn['user'], self.sys_conn['host']))
conn_status = self._connect()
if not conn_status['valid']:
return conn_status
self.log.info('Successfully established connection to \'%s@%s\'' % (self.sys_conn['user'], self.sys_conn['host']))
# Failed to establish SSH connection
except Exception as e:
return invalid(self.log.exception('SSH connection to \'%s@%s\' failed with error: %s' % (self.sys_conn['user'], self.sys_conn['host'], e)))
# Return the connection object
return valid(self)
def close(self):
"""
Close the connection once you have finished running commands and copying
files. Ends interaction with the remote host.
If there is a problem with closing the connection, an exception will be
logged, but the program will continue as normal.
Closing a connection::
# Attempt to close the connection
remote.close()
"""
try:
self.client_exec.close()
self.log.info('Closed all remote connections to host \'%s\'' % (self.sys_conn['host']))
except Exception as e:
self.log.exception('Failed to close all remote connections to host \'%s\': %s' % (self.sys_conn['host'], str(e)))
def execute(self, commands=[]):
"""
Run a series of remote commands on the server. Takes a single argument,
which is a list of commands to run. The exit code, stdout, and stderr
are captured for each command and returned in a list.
If any commands fail to run, this is logged and the rest of the commands
continue as normal.
:param commands: A list of commands to run on the remote server
:type commands: list
:rtype: list of dictionaries
Running commands::
# A list of commands to run
commands = ['uname', 'hostname']
# Run the commands and capture the output
output = remote.execute(commands)
Example output structure::
output = [
{
'command': 'uname',
'exit_code': 0,
'stdout': ['Linux cloudscape 3.8.0-29-generic #42~precise1-Ubuntu SMP Wed Aug 14 16:19:23 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux'],
'stderr': []
},
{
'command': 'hostname',
'exit_code': 0,
'stdout': ['myserver'],
'stderr': []
}
]
"""
if not commands or not isinstance(commands, list):
self.log.error(self._error(003))
return invalid(self._error(003))
self.log.info('Preparing to run [%s] commands on [%s] host' % (len(commands), self.sys_type))
# Windows remote commands
if self.sys_type == 'windows':
# Variable to store stdout and stderr
output = []
# Run each command in the list
for command in commands:
tried = False
def win_ssh_exec(command, false):
try:
stdin, stdout, stderr = self.client_exec.exec_command(command)
# Get the exit code
exit_code = stdout.channel.recv_exit_status()
# Save the command output
self.log.info('Ran command \'%s\' on host \'%s\' with exit code \'%s\'' % (command, self.sys_conn['host'], exit_code))
output.append({'command': command,
'exit_code': exit_code,
'stdout': stdout.readlines(),
'stderr': stderr.readlines()})
return True
# Need to reconnect
except Exception as e:
if not tried:
conn_status = self._connect()
if not conn_status['valid']:
self.log.error('Lost connection while running command: [%s]' % command)
return False
win_ssh_exec(command, tried)
else:
self.log.exception('Failed to run command \'%s\' on host \'%s\' with error: %s' % (command, self.sys_conn['host'], str(e)))
# Windows SSH exec wrapper
win_ssh_exec(command, tried)
return output
# Linux remote commands
if self.sys_type == 'linux':
# Variable to store stdout and stderr
output = []
# Run each command in the list
for command in commands:
try:
# Sudo Metacharacter
#
# Allow an API call with the {%sudo%} special metacharacter. If running with
# a non-root user, and a password is available, extract the command and try to
# run with sudo privileges.
if re.match(r'^{sudo}.*$', command):
command = re.compile('^{sudo}(.*$)').sub(r'\g<1>', command)
if self.sys_conn['user'] != 'root':
if not self.sys_conn['passwd']:
self.log.error(self._error(005))
continue
else:
sudo = True
sudo_passwd = self.sys_conn['passwd']
else:
sudo = False
else:
sudo = False
# Optional sudo password for non-root accounts
if sudo:
cmd_string = 'sudo -S bash -l -c \'%s\'' % command
stdin, stdout, stderr = self.client_exec.exec_command(cmd_string)
stdin.write('%s\n' % sudo_passwd)
stdin.flush()
# Non-root commands
else:
cmd_string = 'bash -l -c \'%s\'' % command
stdin, stdout, stderr = self.client_exec.exec_command(cmd_string)
# Get the exit code
exit_code = stdout.channel.recv_exit_status()
# Save the command output
self.log.info('Ran command \'%s\' on host \'%s\' with exit code \'%s\'' % (cmd_string, self.sys_conn['host'], exit_code))
output.append({'command': command,
'exit_code': exit_code,
'stdout': stdout.readlines(),
'stderr': stderr.readlines()})
# Failed to execute remote command
except Exception as e:
self.log.error('Failed to run command \'%s\' on host \'%s\' with error: %s' % (command, self.sys_conn['host'], e))
return output
def chmod(self, path=None, mode=None, file_mode=None, dir_mode=None, recurse=False):
"""
Change permissions of a file or directory on the remote server. Currently
only supported for Linux because right now I'm too lazy to abstract the
differences for Windows permissions.
This method uses the 'execute' method to run these commands.
:param path: The file or directory you want to modify
:type path: str
:param mode: The octal permissions to set (for a single file or all contents)
:type mode: str
:param file_mode: If modifying a directory, mode to apply to files
:type file_mode: str
:param dir_mode: If modifying a directory, mode to apply to folders
:type file_mode: str
:param recurse: Whether or not to recurse into subdirectories
:type recurse: bool
Changing permissions::
# Change permissions of a single file
remote.chmod(path='/tmp/somefile.txt', mode='755')
# Change permissions on only files recursively
remote.chmod('/tmp/somepath', file_mode='644', recurse=True)
# Change permissions on both files and folders recursively
remote.chmod('/tmp/someotherpath', file_mode='644', dir_mode='755', recurse=True)
"""
if not path or not mode:
return False
# Only valid for Linux machines
if self.sys_type == 'linux':
# Build the base command
cmd_base = 'chmod -R' if recurse else 'chmod'
# Global permissions
if mode:
self.execute(['%s %s %s' % (cmd_base, mode, path)])
# File permissions
if file_mode:
if recurse == True:
self.execute(['find %s -type f -exec chmod %s {} \;' % (path, file_mode)])
else:
self.execute(['find %s -maxdepth 1 -type f -exec chmod %s {} \;' % (path, file_mode)])
# Directory permissions
if dir_mode:
if recurse == True:
self.execute(['find %s -type d -exec chmod %s {} \;' % (path, dir_mode)])
else:
self.execute(['find %s -maxdepth 1 -type d -exec chmod %s {} \;' % (path, dir_mode)])
def chown(self, path=None, user=None, group=None, recurse=False):
"""
Change ownership of a file or directory structure. Optional flags
to use a different name/group combination, and to recurse into the
base path.
This method uses the 'execute' method to run these commands.
:param path: The file or directory you want to modify
:type path: str
:param user: The user to change ownership to
:type user: str
:param group: The optional group to change ownership to
:type group: str
:param recurse: Whether or not to recurse into subdirectories
:type recurse: bool
Changing ownership::
# Set ownership on a single file
remote.chown(path='/some/file.txt', user='name', group='name')
# Set ownership recursively on an entire path
remote.chown(path='/some/path', user='name', group='name', recurse=True)
"""
if not path or not user:
return False
# Only valid for Linux machines
if self.sys_type == 'linux':
# Build the remote command
cmd_base = 'chown -R' if recurse else 'chown'
# Set the user/group string
cmd_owner = user
if group:
cmd_owner = '%s:%s' % (user, group)
# Set the owner of the path
self.execute(['%s %s %s' % (cmd_base, cmd_owner, path)])
def send_file(self, local=None, remote=None, mode=False, user=False, group=False):
"""
Send a file to a remote server, and for Linux systems, optionally specify the
remote file permissions to set after transfer.
:param local: The path to the local file to send
:type local: str
:param remote: The path to the remote file (defaults to local if not set)
:type remote: str
:param mode: The optional modal permissions
:type mode: str
:param user: The optional owner (user)
:type user: str
:param group: The optional owner (group)
:type group: str
Sending a file::
# Sending a file where the remote path matches the local path
remote.send_file(local='/some/file.txt')
# Sending a file to a different remote path
remote.send_file(local='/some/file.txt', remote='/some/otherfile.txt')
# Sending a file and setting permissions
remote.send_file(local='/some/file.txt', mode=644, user='name', group='name')
"""
if not self.client_copy:
return False
# Default destination to source path if none provided
if not remote: remote = local
# Send the file
tried = False
def try_send(local, remote, mode, user, group, tried):
try:
self.client_copy.put(local, remote)
self.log.info('Copied file \'%s\' to remote server \'%s\':\'%s\'' % (local, self.sys_conn['host'], remote))
# Additional Linux options
if self.sys_type == 'linux':
# Optional file mode
if mode:
self.chmod(path=remote, mode=mode)
# Optional file ownership
if user:
self.chown(path=remote, user=user, group=group, recurse=False)
# Failed to send the file, try to reconnect and send again
except Exception as e:
if not tried:
conn_status = self._connect()
if not conn_status['valid']:
self.log.error('Lost connection while transferring file: [%s]' % local)
return False
tried = True
try_send(local, remote, mode, user, group, tried)
else:
self.log.exception('Failed to send file \'%s\' to remote server \'%s\':\'%s\' with error: %s' % (local, self.sys_conn['host'], remote, e))
# Launch the send wrapper
try_send(local, remote, mode, user, group, tried)
def get_file(self, remote=None, local=None):
"""
Get a file from a remote server.
:param remote: The path on the remote server to retrieve
:type remote: str
:param local: The local path to transfer to (defaults to the same as remote)
:type local: str
Getting a file::
# Get a remote file and preserve the path
remote.get_file(remote='/some/file.txt')
# Get a remote file and use a new local path
remote.get_file(remote='/some/file.txt', local='/some/otherfile.txt')
"""
if not self.client_copy:
return False
# Default local path to remote path
if not local: local = remote
# Get the file
tried = False
def try_get(remote, local, tried):
try:
self.client_copy.get(remote, local)
self.log.info('Retrieved file \'%s\' from remote server \'%s\':\'%s\'' % (local, self.sys_conn['host'], remote))
except Exception as e:
if not tried:
conn_status = self._connect()
if not conn_status['valid']:
self.log.error('Lost connection while retrieving file: [%s]' % remote)
return False
tried = True
try_get(remote, local, tried)
else:
self.log.exception('Failed to get file \'%s\' from remote server \'%s\':\'%s\' with error: %s' % (local, self.sys_conn['host'], remote, e))
# Launch the get wrapper
try_get(remote, local, tried) | djtaylor/cloudscape-DEPRECATED | python/cloudscape/common/remote.py | Python | gpl-3.0 | 25,284 | 0.009294 |
from operator import attrgetter
from django.test import TestCase
from models import Post, Question, Answer
class OrderWithRespectToTests(TestCase):
def test_basic(self):
q1 = Question.objects.create(text="Which Beatle starts with the letter 'R'?")
q2 = Question.objects.create(text="What is your name?")
Answer.objects.create(text="John", question=q1)
Answer.objects.create(text="Jonno", question=q2)
Answer.objects.create(text="Paul", question=q1)
Answer.objects.create(text="Paulo", question=q2)
Answer.objects.create(text="George", question=q1)
Answer.objects.create(text="Ringo", question=q1)
# The answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Ringo",
],
attrgetter("text"),
)
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = Answer.objects.filter(question=q1)[0]
self.assertEqual(a1.text, "John")
a2 = a1.get_next_in_order()
self.assertEqual(a2.text, "Paul")
a4 = list(Answer.objects.filter(question=q1))[-1]
self.assertEqual(a4.text, "Ringo")
self.assertEqual(a4.get_previous_in_order().text, "George")
# Determining (and setting) the ordering for a particular item is also
# possible.
id_list = [o.pk for o in q1.answer_set.all()]
self.assertEqual(a2.question.get_answer_order(), id_list)
a5 = Answer.objects.create(text="Number five", question=q1)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
self.assertEqual(
a2.question.get_answer_order(), a5.question.get_answer_order()
)
# The ordering can be altered:
id_list = [o.pk for o in q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
self.assertNotEqual(a5.question.get_answer_order(), id_list)
a5.question.set_answer_order(id_list)
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
def test_recursive_ordering(self):
p1 = Post.objects.create(title='1')
p2 = Post.objects.create(title='2')
p1_1 = Post.objects.create(title="1.1", parent=p1)
p1_2 = Post.objects.create(title="1.2", parent=p1)
p2_1 = Post.objects.create(title="2.1", parent=p2)
p1_3 = Post.objects.create(title="1.3", parent=p1)
self.assertEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
| mzdaniel/oh-mainline | vendor/packages/Django/tests/modeltests/order_with_respect_to/tests.py | Python | agpl-3.0 | 2,870 | 0.002787 |
from stagecraft.apps.datasets.models import DataGroup, DataSet, DataType
from django.test import TestCase
from stagecraft.libs.mass_update import DataSetMassUpdate
from nose.tools import assert_equal
class TestDataSetMassUpdate(TestCase):
@classmethod
def setUpClass(cls):
cls.data_group1 = DataGroup.objects.create(name='datagroup1')
cls.data_group2 = DataGroup.objects.create(name='datagroup2')
cls.data_type1 = DataType.objects.create(name='datatype1')
cls.data_type2 = DataType.objects.create(name='datatype2')
cls.dataset_a = DataSet.objects.create(
name='foo',
data_group=cls.data_group1,
bearer_token="abc123",
data_type=cls.data_type1)
cls.dataset_b = DataSet.objects.create(
name='bar',
data_group=cls.data_group2,
bearer_token="def456",
data_type=cls.data_type1)
cls.dataset_c = DataSet.objects.create(
name='baz',
data_group=cls.data_group2,
bearer_token="999999",
data_type=cls.data_type2)
@classmethod
def tearDownClass(cls):
pass
def test_update_bearer_token_by_date_type(self):
new_bearer_token = "ghi789"
query = {u'data_type': self.data_type1.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 2)
assert_equal(dataset_a.bearer_token, new_bearer_token)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token == new_bearer_token, False)
def test_update_bearer_token_by_data_group(self):
new_bearer_token = "ghi789"
query = {u'data_group': self.data_group2.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 2)
assert_equal(dataset_a.bearer_token == new_bearer_token, False)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token, new_bearer_token)
def test_update_bearer_token_by_data_group_and_data_type(self):
new_bearer_token = "ghi789"
query = {
u'data_type': self.data_type1.name,
u'data_group': self.data_group2.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 1)
assert_equal(dataset_a.bearer_token == new_bearer_token, False)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token == new_bearer_token, False)
| alphagov/stagecraft | stagecraft/libs/mass_update/test_data_set_mass_update.py | Python | mit | 3,422 | 0 |
try:
from settings_local import MyConf
except ImportError:
from feedreader.conf.base import Dev as MyConf
try:
from settings_local import MyTestConf
except ImportError:
from feedreader.conf.base import Test as MyTestConf
| jo-soft/jadfr | jadfr/feedreader/conf/my_conf.py | Python | gpl-3.0 | 238 | 0 |
#!/bin/env python
# -*- coding: utf-8 -*-
PYS_SERVICE_MOD_PRE='pys_' # 模块名称的前缀
PYS_HEAD_LEN=12 # 报文头长度
PYS_MAX_BODY_LEN=10485760 # 最大报文长度
| dungeonsnd/test-code | dev_examples/pyserver/src/util/pys_define.py | Python | gpl-3.0 | 184 | 0.033784 |
# -*- coding:utf-8 -*-
from functools import wraps
from flask import abort
from flask import request
def kwargs_required(*required_args):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
for arg in required_args:
if arg not in kwargs:
return abort(400, "Argument <{0}> is required".format(arg))
return func(*args, **kwargs)
return wrapper
return decorate
def args_required(*required_args):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
for arg in required_args:
if arg not in request.values:
return abort(400, "Argument <{0}> is required".format(arg))
return func(*args, **kwargs)
return wrapper
return decorate
| pycook/cmdb | cmdb-api/api/lib/decorator.py | Python | gpl-2.0 | 838 | 0 |
# -*- coding: utf-8 -*-
#
# Zend Framework 2 documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 6 18:55:07 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zend Framework 2'
copyright = u'2015, Zend Technologies Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.4'
# The full version, including alpha/beta/rc tags.
release = '2.4.10dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '*snippets.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../zf2_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZendFramework2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ZendFramework2.tex', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zendframework2', u'Zend Framework 2 Documentation',
[u'Zend Technologies Ltd.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZendFramework2', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'ZendFramework2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Hack to render the php source code without the <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
| froschdesign/zf2-documentation | docs/src/conf.py | Python | bsd-3-clause | 8,058 | 0.007322 |
#!/usr/bin/python
import shtest, sys, common
from common import *
from math import *
def frac_test(p, types=[]):
if is_array(p):
result = [a - floor(a) for a in p]
else:
result = [a - floor(a)]
return shtest.make_test(result, [p], types)
def insert_into(test):
test.add_test(frac_test((-8.0, -8.9, -8.5, -8.1)))
test.add_test(frac_test((-0.0, -0.5, 0.0, 0.5)))
test.add_test(frac_test((8.0, 8.9, 8.5, 8.1)))
test.add_test(frac_test((18.0, 18.9, -18.1)))
test.add_make_test((0, 0), [(1098908.975123, -12131318.123456)])
test.add_test(frac_test((1234567890123456789.0, )))
test.add_test(frac_test((-1234567890123456789.0, )))
# Test frac in stream programs
test = shtest.StreamTest('frac', 1)
test.add_call(shtest.Call(shtest.Call.call, 'frac', 1))
insert_into(test)
test.output_header(sys.stdout)
test.output(sys.stdout, False)
# Test frac in immediate mode
test = shtest.ImmediateTest('frac_im', 1)
test.add_call(shtest.Call(shtest.Call.call, 'frac', 1))
insert_into(test)
test.output(sys.stdout, False)
test.output_footer(sys.stdout)
| libsh-archive/sh | test/regress/frac.cpp.py | Python | lgpl-2.1 | 1,097 | 0.003646 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"AdParameterErrorEnum",},
)
class AdParameterErrorEnum(proto.Message):
r"""Container for enum describing possible ad parameter errors.
"""
class AdParameterError(proto.Enum):
r"""Enum describing possible ad parameter errors."""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_CRITERION_MUST_BE_KEYWORD = 2
INVALID_INSERTION_TEXT_FORMAT = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v9/errors/types/ad_parameter_error.py | Python | apache-2.0 | 1,192 | 0.000839 |
" Settings for tests. "
from settings.project import *
# Databases
DATABASES= {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'TEST_CHARSET': 'utf8',
}
}
| klen/django-netauth | example_project/settings/test.py | Python | lgpl-3.0 | 347 | 0.005764 |
# -*- coding: utf-8 -*-
from framework.tasks import app
from framework.tasks.handlers import enqueue_task
from website import settings
from . import piwik
@app.task(bind=True, max_retries=5, default_retry_delay=60)
def _update_node(self, node_id, updated_fields=None):
# Avoid circular imports
from framework.transactions.context import TokuTransaction
from website import models
node = models.Node.load(node_id)
try:
with TokuTransaction():
piwik._update_node_object(node, updated_fields)
except Exception as error:
raise self.retry(exc=error)
def update_node(node_id, updated_fields):
if settings.USE_CELERY:
signature = _update_node.s(node_id, updated_fields)
enqueue_task(signature)
else:
_update_node(node_id, updated_fields)
| AndrewSallans/osf.io | framework/analytics/tasks.py | Python | apache-2.0 | 822 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.is_mysqldba_oncall'
db.add_column(u'user_profiles', 'is_mysqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.is_pgsqldba_oncall'
db.add_column(u'user_profiles', 'is_pgsqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.current_mysqldba_oncall'
db.add_column(u'user_profiles', 'current_mysqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.current_pgsqldba_oncall'
db.add_column(u'user_profiles', 'current_pgsqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.is_mysqldba_oncall'
db.delete_column(u'user_profiles', 'is_mysqldba_oncall')
# Deleting field 'UserProfile.is_pgsqldba_oncall'
db.delete_column(u'user_profiles', 'is_pgsqldba_oncall')
# Deleting field 'UserProfile.current_mysqldba_oncall'
db.delete_column(u'user_profiles', 'current_mysqldba_oncall')
# Deleting field 'UserProfile.current_pgsqldba_oncall'
db.delete_column(u'user_profiles', 'current_pgsqldba_oncall')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dhcp.dhcp': {
'Meta': {'object_name': 'DHCP', 'db_table': "u'dhcp_scopes'"},
'allow_booting': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'allow_bootp': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option_domain_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'option_domain_name_servers': ('django.db.models.fields.CharField', [], {'max_length': '48', 'null': 'True', 'blank': 'True'}),
'option_ntp_servers': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'option_routers': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'option_subnet_mask': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'pool_deny_dynamic_bootp_agents': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'pool_range_end': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'pool_range_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'scope_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scope_netmask': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'scope_notes': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'scope_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'systems.advisorydata': {
'Meta': {'object_name': 'AdvisoryData', 'db_table': "u'advisory_data'"},
'advisory': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'references': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'severity': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.contract': {
'Meta': {'object_name': 'Contract', 'db_table': "u'contracts'"},
'contract_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contract_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'support_level': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'systems.keyvalue': {
'Meta': {'object_name': 'KeyValue', 'db_table': "u'key_value'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'systems.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "u'locations'"},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'systems.mac': {
'Meta': {'object_name': 'Mac', 'db_table': "u'macs'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '17'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"})
},
'systems.networkadapter': {
'Meta': {'object_name': 'NetworkAdapter', 'db_table': "u'network_adapters'"},
'adapter_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dhcp_scope': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dhcp.DHCP']", 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mac_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'option_domain_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_host_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'switch_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'switch_port': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'system_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.portdata': {
'Meta': {'object_name': 'PortData', 'db_table': "u'port_data'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
'systems.scheduledtask': {
'Meta': {'ordering': "['task']", 'object_name': 'ScheduledTask', 'db_table': "u'scheduled_tasks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.system': {
'Meta': {'object_name': 'System', 'db_table': "u'systems'"},
'allocation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Allocation']", 'null': 'True', 'blank': 'True'}),
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'change_password': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dhcp_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_dns_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_nagios_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_puppet_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_switch': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'licenses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'oob_ip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'oob_switch_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'patch_panel_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rack_order': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'ram': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'switch_ports': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system_rack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemRack']", 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemStatus']", 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'systems.systemchangelog': {
'Meta': {'object_name': 'SystemChangeLog', 'db_table': "u'systems_change_log'"},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {}),
'changed_text': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"})
},
'systems.systemrack': {
'Meta': {'ordering': "['name']", 'object_name': 'SystemRack', 'db_table': "u'system_racks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemstatus': {
'Meta': {'ordering': "['status']", 'object_name': 'SystemStatus', 'db_table': "u'system_statuses'"},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemtype': {
'Meta': {'object_name': 'SystemType', 'db_table': "u'system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "u'user_profiles'"},
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'current_desktop_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_mysqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_pgsqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_services_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_sysadmin_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epager_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_nick': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'is_desktop_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_mysqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_pgsqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_services_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sysadmin_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pager_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pager_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['systems'] | mozilla/inventory | systems/migrations/0002_auto__add_field_userprofile_is_mysqldba_oncall__add_field_userprofile_.py | Python | bsd-3-clause | 21,325 | 0.008159 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import operator
import sys
import uuid
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from multiprocessing.pool import ThreadPool
from pyspark import keyword_only, since, SparkContext
from pyspark.ml import Estimator, Predictor, PredictionModel, Model
from pyspark.ml.param.shared import HasRawPredictionCol, HasProbabilityCol, HasThresholds, \
HasRegParam, HasMaxIter, HasFitIntercept, HasTol, HasStandardization, HasWeightCol, \
HasAggregationDepth, HasThreshold, HasBlockSize, HasMaxBlockSizeInMB, Param, Params, \
TypeConverters, HasElasticNetParam, HasSeed, HasStepSize, HasSolver, HasParallelism
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _RandomForestParams, _GBTParams, \
_HasVarianceImpurity, _TreeClassifierParams
from pyspark.ml.regression import _FactorizationMachinesParams, DecisionTreeRegressionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.util import DefaultParamsReader, DefaultParamsWriter, \
JavaMLReadable, JavaMLReader, JavaMLWritable, JavaMLWriter, \
MLReader, MLReadable, MLWriter, MLWritable, HasTrainingSummary
from pyspark.ml.wrapper import JavaParams, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LinearSVC', 'LinearSVCModel',
'LinearSVCSummary', 'LinearSVCTrainingSummary',
'LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'RandomForestClassificationSummary', 'RandomForestClassificationTrainingSummary',
'BinaryRandomForestClassificationSummary',
'BinaryRandomForestClassificationTrainingSummary',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'MultilayerPerceptronClassificationSummary',
'MultilayerPerceptronClassificationTrainingSummary',
'OneVsRest', 'OneVsRestModel',
'FMClassifier', 'FMClassificationModel', 'FMClassificationSummary',
'FMClassificationTrainingSummary']
class _ClassifierParams(HasRawPredictionCol, _PredictorParams):
"""
Classifier Params for classification tasks.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class Classifier(Predictor, _ClassifierParams, metaclass=ABCMeta):
"""
Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class ClassificationModel(PredictionModel, _ClassifierParams, metaclass=ABCMeta):
"""
Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@abstractproperty
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
raise NotImplementedError()
@abstractmethod
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
raise NotImplementedError()
class _ProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, _ClassifierParams):
"""
Params for :py:class:`ProbabilisticClassifier` and
:py:class:`ProbabilisticClassificationModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class ProbabilisticClassifier(Classifier, _ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Probabilistic Classifier for classification tasks.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@inherit_doc
class ProbabilisticClassificationModel(ClassificationModel,
_ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@abstractmethod
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
raise NotImplementedError()
@inherit_doc
class _JavaClassifier(Classifier, JavaPredictor, metaclass=ABCMeta):
"""
Java Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class _JavaClassificationModel(ClassificationModel, JavaPredictionModel):
"""
Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with :class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
return self._call_java("predictRaw", value)
@inherit_doc
class _JavaProbabilisticClassifier(ProbabilisticClassifier, _JavaClassifier,
metaclass=ABCMeta):
"""
Java Probabilistic Classifier for classification tasks.
"""
pass
@inherit_doc
class _JavaProbabilisticClassificationModel(ProbabilisticClassificationModel,
_JavaClassificationModel):
"""
Java Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class _ClassificationSummary(JavaWrapper):
"""
Abstraction for multiclass classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("3.1.0")
def predictionCol(self):
"""
Field in "predictions" which gives the prediction of each class.
"""
return self._call_java("predictionCol")
@property
@since("3.1.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("3.1.0")
def weightCol(self):
"""
Field in "predictions" which gives the weight of each instance
as a vector.
"""
return self._call_java("weightCol")
@property
def labels(self):
"""
Returns the sequence of labels in ascending order. This order matches the order used
in metrics which are specified as arrays over labels, e.g., truePositiveRateByLabel.
.. versionadded:: 3.1.0
Notes
-----
In most cases, it will be values {0.0, 1.0, ..., numClasses-1}, However, if the
training set is missing a label, then all of the arrays over labels
(e.g., from truePositiveRateByLabel) will be of length numClasses-1 instead of the
expected numClasses.
"""
return self._call_java("labels")
@property
@since("3.1.0")
def truePositiveRateByLabel(self):
"""
Returns true positive rate for each label (category).
"""
return self._call_java("truePositiveRateByLabel")
@property
@since("3.1.0")
def falsePositiveRateByLabel(self):
"""
Returns false positive rate for each label (category).
"""
return self._call_java("falsePositiveRateByLabel")
@property
@since("3.1.0")
def precisionByLabel(self):
"""
Returns precision for each label (category).
"""
return self._call_java("precisionByLabel")
@property
@since("3.1.0")
def recallByLabel(self):
"""
Returns recall for each label (category).
"""
return self._call_java("recallByLabel")
@since("3.1.0")
def fMeasureByLabel(self, beta=1.0):
"""
Returns f-measure for each label (category).
"""
return self._call_java("fMeasureByLabel", beta)
@property
@since("3.1.0")
def accuracy(self):
"""
Returns accuracy.
(equals to the total number of correctly classified instances
out of the total number of instances.)
"""
return self._call_java("accuracy")
@property
@since("3.1.0")
def weightedTruePositiveRate(self):
"""
Returns weighted true positive rate.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedTruePositiveRate")
@property
@since("3.1.0")
def weightedFalsePositiveRate(self):
"""
Returns weighted false positive rate.
"""
return self._call_java("weightedFalsePositiveRate")
@property
@since("3.1.0")
def weightedRecall(self):
"""
Returns weighted averaged recall.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedRecall")
@property
@since("3.1.0")
def weightedPrecision(self):
"""
Returns weighted averaged precision.
"""
return self._call_java("weightedPrecision")
@since("3.1.0")
def weightedFMeasure(self, beta=1.0):
"""
Returns weighted averaged f-measure.
"""
return self._call_java("weightedFMeasure", beta)
@inherit_doc
class _TrainingSummary(JavaWrapper):
"""
Abstraction for Training results.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration. It contains one more element, the initial state,
than number of iterations.
"""
return self._call_java("objectiveHistory")
@property
@since("3.1.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class _BinaryClassificationSummary(_ClassificationSummary):
"""
Binary classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def scoreCol(self):
"""
Field in "predictions" which gives the probability or raw prediction
of each class as a vector.
"""
return self._call_java("scoreCol")
@property
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. versionadded:: 3.1.0
Notes
-----
`Wikipedia reference <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
"""
return self._call_java("roc")
@property
@since("3.1.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
"""
return self._call_java("areaUnderROC")
@property
@since("3.1.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
"""
return self._call_java("pr")
@property
@since("3.1.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("3.1.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
"""
return self._call_java("precisionByThreshold")
@property
@since("3.1.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
"""
return self._call_java("recallByThreshold")
class _LinearSVCParams(_ClassifierParams, HasRegParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth, HasThreshold,
HasMaxBlockSizeInMB):
"""
Params for :py:class:`LinearSVC` and :py:class:`LinearSVCModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"The threshold in binary classification applied to the linear model"
" prediction. This threshold can be any real number, where Inf will make"
" all predictions 0.0 and -Inf will make all predictions 1.0.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_LinearSVCParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, threshold=0.0, aggregationDepth=2,
maxBlockSizeInMB=0.0)
@inherit_doc
class LinearSVC(_JavaClassifier, _LinearSVCParams, JavaMLWritable, JavaMLReadable):
"""
This binary classifier optimizes the Hinge Loss using the OWLQN optimizer.
Only supports L2 regularization currently.
.. versionadded:: 2.2.0
Notes
-----
`Linear SVM Classifier <https://en.wikipedia.org/wiki/Support_vector_machine#Linear_SVM>`_
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(label=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> svm = LinearSVC()
>>> svm.getMaxIter()
100
>>> svm.setMaxIter(5)
LinearSVC...
>>> svm.getMaxIter()
5
>>> svm.getRegParam()
0.0
>>> svm.setRegParam(0.01)
LinearSVC...
>>> svm.getRegParam()
0.01
>>> model = svm.fit(df)
>>> model.setPredictionCol("newPrediction")
LinearSVCModel...
>>> model.getPredictionCol()
'newPrediction'
>>> model.setThreshold(0.5)
LinearSVCModel...
>>> model.getThreshold()
0.5
>>> model.getMaxBlockSizeInMB()
0.0
>>> model.coefficients
DenseVector([0.0, -0.2792, -0.1833])
>>> model.intercept
1.0206118982229047
>>> model.numClasses
2
>>> model.numFeatures
3
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.4831, 1.4831])
>>> result = model.transform(test0).head()
>>> result.newPrediction
1.0
>>> result.rawPrediction
DenseVector([-1.4831, 1.4831])
>>> svm_path = temp_path + "/svm"
>>> svm.save(svm_path)
>>> svm2 = LinearSVC.load(svm_path)
>>> svm2.getMaxIter()
5
>>> model_path = temp_path + "/svm_model"
>>> model.save(model_path)
>>> model2 = LinearSVCModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
super(LinearSVC, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LinearSVC", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
Sets params for Linear SVM Classifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearSVCModel(java_model)
@since("2.2.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.2.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.2.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.2.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.2.0")
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
@since("2.2.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("2.2.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.2.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LinearSVCModel(_JavaClassificationModel, _LinearSVCParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by LinearSVC.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@property
@since("2.2.0")
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
@property
@since("2.2.0")
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return LinearSVCTrainingSummary(super(LinearSVCModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lsvc_summary = self._call_java("evaluate", dataset)
return LinearSVCSummary(java_lsvc_summary)
class LinearSVCSummary(_BinaryClassificationSummary):
"""
Abstraction for LinearSVC Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class LinearSVCTrainingSummary(LinearSVCSummary, _TrainingSummary):
"""
Abstraction for LinearSVC Training results.
.. versionadded:: 3.1.0
"""
pass
class _LogisticRegressionParams(_ProbabilisticClassifierParams, HasRegParam,
HasElasticNetParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth,
HasThreshold, HasMaxBlockSizeInMB):
"""
Params for :py:class:`LogisticRegression` and :py:class:`LogisticRegressionModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
lowerBoundsOnCoefficients = Param(Params._dummy(), "lowerBoundsOnCoefficients",
"The lower bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
upperBoundsOnCoefficients = Param(Params._dummy(), "upperBoundsOnCoefficients",
"The upper bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
lowerBoundsOnIntercepts = Param(Params._dummy(), "lowerBoundsOnIntercepts",
"The lower bounds on intercepts if fitting under bound "
"constrained optimization. The bounds vector size must be"
"equal with 1 for binomial regression, or the number of"
"lasses for multinomial regression.",
typeConverter=TypeConverters.toVector)
upperBoundsOnIntercepts = Param(Params._dummy(), "upperBoundsOnIntercepts",
"The upper bounds on intercepts if fitting under bound "
"constrained optimization. The bound vector size must be "
"equal with 1 for binomial regression, or the number of "
"classes for multinomial regression.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_LogisticRegressionParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto",
maxBlockSizeInMB=0.0)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self.clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self.clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: {0}".format(str(ts)))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getOrDefault(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
@since("2.3.0")
def getLowerBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self.getOrDefault(self.lowerBoundsOnCoefficients)
@since("2.3.0")
def getUpperBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self.getOrDefault(self.upperBoundsOnCoefficients)
@since("2.3.0")
def getLowerBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self.getOrDefault(self.lowerBoundsOnIntercepts)
@since("2.3.0")
def getUpperBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self.getOrDefault(self.upperBoundsOnIntercepts)
@inherit_doc
class LogisticRegression(_JavaProbabilisticClassifier, _LogisticRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
.. versionadded:: 1.3.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)),
... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)),
... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)),
... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF()
>>> blor = LogisticRegression(weightCol="weight")
>>> blor.getRegParam()
0.0
>>> blor.setRegParam(0.01)
LogisticRegression...
>>> blor.getRegParam()
0.01
>>> blor.setMaxIter(10)
LogisticRegression...
>>> blor.getMaxIter()
10
>>> blor.clear(blor.maxIter)
>>> blorModel = blor.fit(bdf)
>>> blorModel.setFeaturesCol("features")
LogisticRegressionModel...
>>> blorModel.setProbabilityCol("newProbability")
LogisticRegressionModel...
>>> blorModel.getProbabilityCol()
'newProbability'
>>> blorModel.getMaxBlockSizeInMB()
0.0
>>> blorModel.setThreshold(0.1)
LogisticRegressionModel...
>>> blorModel.getThreshold()
0.1
>>> blorModel.coefficients
DenseVector([-1.080..., -0.646...])
>>> blorModel.intercept
3.112...
>>> blorModel.evaluate(bdf).accuracy == blorModel.summary.accuracy
True
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> mdf = spark.read.format("libsvm").load(data_path)
>>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> mlorModel.coefficientMatrix
SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1)
>>> mlorModel.interceptVector
DenseVector([0.04..., -0.42..., 0.37...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF()
>>> blorModel.predict(test0.head().features)
1.0
>>> blorModel.predictRaw(test0.head().features)
DenseVector([-3.54..., 3.54...])
>>> blorModel.predictProbability(test0.head().features)
DenseVector([0.028, 0.972])
>>> result = blorModel.transform(test0).head()
>>> result.prediction
1.0
>>> result.newProbability
DenseVector([0.02..., 0.97...])
>>> result.rawPrediction
DenseVector([-3.54..., 3.54...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getRegParam()
0.01
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
>>> model2
LogisticRegressionModel: uid=..., numClasses=2, numFeatures=2
>>> blorModel.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.3.0")
def setLowerBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self._set(lowerBoundsOnCoefficients=value)
@since("2.3.0")
def setUpperBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self._set(upperBoundsOnCoefficients=value)
@since("2.3.0")
def setLowerBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self._set(lowerBoundsOnIntercepts=value)
@since("2.3.0")
def setUpperBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self._set(upperBoundsOnIntercepts=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LogisticRegressionModel(_JavaProbabilisticClassificationModel, _LogisticRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryLogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
return LogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryLogisticRegressionSummary(java_blr_summary)
else:
return LogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(_ClassificationSummary):
"""
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary, _TrainingSummary):
"""
Abstraction for multinomial Logistic Regression Training results.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionSummary(_BinaryClassificationSummary,
LogisticRegressionSummary):
"""
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class _DecisionTreeClassifierParams(_DecisionTreeParams, _TreeClassifierParams):
"""
Params for :py:class:`DecisionTreeClassifier` and :py:class:`DecisionTreeClassificationModel`.
"""
def __init__(self, *args):
super(_DecisionTreeClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", leafCol="", minWeightFractionPerNode=0.0)
@inherit_doc
class DecisionTreeClassifier(_JavaProbabilisticClassifier, _DecisionTreeClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed", leafCol="leafId")
>>> model = dt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
DecisionTreeClassificationModel...
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> si3 = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model3 = si3.fit(df3)
>>> td3 = si_model3.transform(df3)
>>> dt3 = DecisionTreeClassifier(maxDepth=2, weightCol="weight", labelCol="indexed")
>>> model3 = dt3.fit(td3)
>>> print(model3.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class DecisionTreeClassificationModel(_DecisionTreeModel, _JavaProbabilisticClassificationModel,
_DecisionTreeClassifierParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. versionadded:: 2.0.0
Notes
-----
Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class _RandomForestClassifierParams(_RandomForestParams, _TreeClassifierParams):
"""
Params for :py:class:`RandomForestClassifier` and :py:class:`RandomForestClassificationModel`.
"""
def __init__(self, *args):
super(_RandomForestClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0, leafCol="", minWeightFractionPerNode=0.0,
bootstrap=True)
@inherit_doc
class RandomForestClassifier(_JavaProbabilisticClassifier, _RandomForestClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> rf.getMinWeightFractionPerNode()
0.0
>>> model = rf.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
RandomForestClassificationModel...
>>> model.setRawPredictionCol("newRawPrediction")
RandomForestClassificationModel...
>>> model.getBootstrap()
True
>>> model.getRawPredictionCol()
'newRawPrediction'
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([2.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.newRawPrediction)
0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel...depth=..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
Sets params for linear classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("3.0.0")
def setBootstrap(self, value):
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self._set(bootstrap=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_RandomForestClassifierParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@property
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryRandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
return RandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_rf_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryRandomForestClassificationSummary(java_rf_summary)
else:
return RandomForestClassificationSummary(java_rf_summary)
class RandomForestClassificationSummary(_ClassificationSummary):
"""
Abstraction for RandomForestClassification Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class RandomForestClassificationTrainingSummary(RandomForestClassificationSummary,
_TrainingSummary):
"""
Abstraction for RandomForestClassificationTraining Training results.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationSummary(_BinaryClassificationSummary):
"""
BinaryRandomForestClassification results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationTrainingSummary(BinaryRandomForestClassificationSummary,
RandomForestClassificationTrainingSummary):
"""
BinaryRandomForestClassification training results for a given model.
.. versionadded:: 3.1.0
"""
pass
class _GBTClassifierParams(_GBTParams, _HasVarianceImpurity):
"""
Params for :py:class:`GBTClassifier` and :py:class:`GBTClassifierModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["logistic"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GBTClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTClassifier(_JavaProbabilisticClassifier, _GBTClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
.. versionadded:: 1.4.0
Notes
-----
Multiclass labels are not currently supported.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTClassifier...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTClassifier...
>>> gbt.getMaxIter()
5
>>> gbt.getFeatureSubsetStrategy()
'all'
>>> model = gbt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
GBTClassificationModel...
>>> model.setThresholds([0.3, 0.7])
GBTClassificationModel...
>>> model.getThresholds()
[0.3, 0.7]
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.1697, -1.1697])
>>> model.predictProbability(test0.head().features)
DenseVector([0.9121, 0.0879])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel...numTrees=5...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0),)],
... ["indexed", "features"])
>>> model.evaluateEachIteration(validation)
[0.25..., 0.23..., 0.21..., 0.19..., 0.18...]
>>> model.numClasses
2
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, impurity="variance",
featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_GBTClassifierParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
def evaluateEachIteration(self, dataset):
"""
Method to compute error or loss for every iteration of gradient boosting.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
return self._call_java("evaluateEachIteration", dataset)
class _NaiveBayesParams(_PredictorParams, HasWeightCol):
"""
Params for :py:class:`NaiveBayes` and :py:class:`NaiveBayesModel`.
.. versionadded:: 3.0.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default), bernoulli " +
"and gaussian.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_NaiveBayesParams, self).__init__(*args)
self._setDefault(smoothing=1.0, modelType="multinomial")
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
@inherit_doc
class NaiveBayes(_JavaProbabilisticClassifier, _NaiveBayesParams, HasThresholds, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values for Multinomial NB and Bernoulli NB must be nonnegative.
Since 3.0.0, it supports Complement NB which is an adaptation of the Multinomial NB.
Specifically, Complement NB uses statistics from the complement of each class to compute
the model's coefficients. The inventors of Complement NB show empirically that the parameter
estimates for CNB are more stable than those for Multinomial NB. Like Multinomial NB, the
input feature values for Complement NB must be nonnegative.
Since 3.0.0, it also supports `Gaussian NB \
<https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Gaussian_naive_Bayes>`_.
which can handle continuous data.
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.setFeaturesCol("features")
NaiveBayesModel...
>>> model.getSmoothing()
1.0
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> model.sigma
DenseMatrix(0, 0, [...], ...)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.72..., -0.99...])
>>> model.predictProbability(test0.head().features)
DenseVector([0.32..., 0.67...])
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
>>> nb3 = NaiveBayes().setModelType("gaussian")
>>> model4 = nb3.fit(df)
>>> model4.getModelType()
'gaussian'
>>> model4.sigma
DenseMatrix(2, 2, [0.0, 0.25, 0.0, 0.0], 1)
>>> nb5 = NaiveBayes(smoothing=1.0, modelType="complement", weightCol="weight")
>>> model5 = nb5.fit(df)
>>> model5.getModelType()
'complement'
>>> model5.theta
DenseMatrix(2, 2, [...], 1)
>>> model5.sigma
DenseMatrix(0, 0, [...], ...)
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class NaiveBayesModel(_JavaProbabilisticClassificationModel, _NaiveBayesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@property
@since("3.0.0")
def sigma(self):
"""
variance of each feature.
"""
return self._call_java("sigma")
class _MultilayerPerceptronParams(_ProbabilisticClassifierParams, HasSeed, HasMaxIter,
HasTol, HasStepSize, HasSolver, HasBlockSize):
"""
Params for :py:class:`MultilayerPerceptronClassifier`.
.. versionadded:: 3.0.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_MultilayerPerceptronParams, self).__init__(*args)
self._setDefault(maxIter=100, tol=1E-6, blockSize=128, stepSize=0.03, solver="l-bfgs")
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
@inherit_doc
class MultilayerPerceptronClassifier(_JavaProbabilisticClassifier, _MultilayerPerceptronParams,
JavaMLWritable, JavaMLReadable):
"""
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
.. versionadded:: 1.6.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
>>> mlp.setMaxIter(100)
MultilayerPerceptronClassifier...
>>> mlp.getMaxIter()
100
>>> mlp.getBlockSize()
128
>>> mlp.setBlockSize(1)
MultilayerPerceptronClassifier...
>>> mlp.getBlockSize()
1
>>> model = mlp.fit(df)
>>> model.setFeaturesCol("features")
MultilayerPerceptronClassificationModel...
>>> model.getMaxIter()
100
>>> model.getLayers()
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.predict(testDF.head().features)
1.0
>>> model.predictRaw(testDF.head().features)
DenseVector([-16.208, 16.344])
>>> model.predictProbability(testDF.head().features)
DenseVector([0.0, 1.0])
>>> model.transform(testDF).select("features", "prediction").show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.getLayers() == model2.getLayers()
True
>>> model.weights == model2.weights
True
>>> model.transform(testDF).take(1) == model2.transform(testDF).take(1)
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.getLayers() == model.getLayers()
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction")
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction"):
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
class MultilayerPerceptronClassificationModel(_JavaProbabilisticClassificationModel,
_MultilayerPerceptronParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return MultilayerPerceptronClassificationTrainingSummary(
super(MultilayerPerceptronClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_mlp_summary = self._call_java("evaluate", dataset)
return MultilayerPerceptronClassificationSummary(java_mlp_summary)
class MultilayerPerceptronClassificationSummary(_ClassificationSummary):
"""
Abstraction for MultilayerPerceptronClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class MultilayerPerceptronClassificationTrainingSummary(MultilayerPerceptronClassificationSummary,
_TrainingSummary):
"""
Abstraction for MultilayerPerceptronClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
class _OneVsRestParams(_ClassifierParams, HasWeightCol):
"""
Params for :py:class:`OneVsRest` and :py:class:`OneVsRestModelModel`.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, _OneVsRestParams, HasParallelism, MLReadable, MLWritable):
"""
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> df = spark.read.format("libsvm").load(data_path)
>>> lr = LogisticRegression(regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> ovr.getRawPredictionCol()
'rawPrediction'
>>> ovr.setPredictionCol("newPrediction")
OneVsRest...
>>> model = ovr.fit(df)
>>> model.models[0].coefficients
DenseVector([0.5..., -1.0..., 3.4..., 4.2...])
>>> model.models[1].coefficients
DenseVector([-2.1..., 3.1..., -2.6..., -2.3...])
>>> model.models[2].coefficients
DenseVector([0.3..., -3.4..., 1.0..., -1.1...])
>>> [x.intercept for x in model.models]
[-2.7..., -2.5..., -1.3...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0, 1.0, 1.0))]).toDF()
>>> model.transform(test0).head().newPrediction
0.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(4, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().newPrediction
2.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4, 0.3, 0.2))]).toDF()
>>> model.transform(test2).head().newPrediction
0.0
>>> model_path = temp_path + "/ovr_model"
>>> model.save(model_path)
>>> model2 = OneVsRestModel.load(model_path)
>>> model2.transform(test0).head().newPrediction
0.0
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.transform(test2).columns
['features', 'rawPrediction', 'newPrediction']
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
super(OneVsRest, self).__init__()
self._setDefault(parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
Sets params for OneVsRest.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
"""
return self._set(classifier=value)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
weightCol = None
if (self.isDefined(self.weightCol) and self.getWeightCol()):
if isinstance(classifier, HasWeightCol):
weightCol = self.getWeightCol()
else:
warnings.warn("weightCol is ignored, "
"as it is not supported by {} now.".format(classifier))
if weightCol:
multiclassLabeled = dataset.select(labelCol, featuresCol, weightCol)
else:
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
if weightCol:
paramMap[classifier.weightCol] = weightCol
return classifier.fit(trainingDataset, paramMap)
pool = ThreadPool(processes=min(self.getParallelism(), numClasses))
models = pool.map(trainSingleClass, range(numClasses))
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Examples
--------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRest`
Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
rawPredictionCol = java_stage.getRawPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
parallelism = java_stage.getParallelism()
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
rawPredictionCol=rawPredictionCol, classifier=classifier,
parallelism=parallelism)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage.setWeightCol(java_stage.getWeightCol())
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.setWeightCol(self.getWeightCol())
_java_obj.setRawPredictionCol(self.getRawPredictionCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestReader(cls)
def write(self):
if isinstance(self.getClassifier(), JavaMLWritable):
return JavaMLWriter(self)
else:
return OneVsRestWriter(self)
class _OneVsRestSharedReadWrite:
@staticmethod
def saveImpl(instance, sc, path, extraMetadata=None):
skipParams = ['classifier']
jsonParams = DefaultParamsWriter.extractJsonParams(instance, skipParams)
DefaultParamsWriter.saveMetadata(instance, path, sc, paramMap=jsonParams,
extraMetadata=extraMetadata)
classifierPath = os.path.join(path, 'classifier')
instance.getClassifier().save(classifierPath)
@staticmethod
def loadClassifier(path, sc):
classifierPath = os.path.join(path, 'classifier')
return DefaultParamsReader.loadParamsInstance(classifierPath, sc)
@staticmethod
def validateParams(instance):
elems_to_check = [instance.getClassifier()]
if isinstance(instance, OneVsRestModel):
elems_to_check.extend(instance.models)
for elem in elems_to_check:
if not isinstance(elem, MLWritable):
raise ValueError(f'OneVsRest write will fail because it contains {elem.uid} '
f'which is not writable.')
@inherit_doc
class OneVsRestReader(MLReader):
def __init__(self, cls):
super(OneVsRestReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
ova = OneVsRest(classifier=classifier)._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(ova, metadata, skipParams=['classifier'])
return ova
@inherit_doc
class OneVsRestWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
_OneVsRestSharedReadWrite.saveImpl(self.instance, self.sc, path)
class OneVsRestModel(Model, _OneVsRestParams, MLReadable, MLWritable):
"""
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
if not isinstance(models[0], JavaMLWritable):
return
# set java instance
java_models = [model._to_java() for model in self.models]
sc = SparkContext._active_spark_context
java_models_array = JavaWrapper._new_java_array(java_models,
sc._gateway.jvm.org.apache.spark.ml
.classification.ClassificationModel)
# TODO: need to set metadata
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
self._java_obj = \
JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = self.getRawPredictionCol()
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
if self.getRawPredictionCol():
def func(predictions):
predArray = []
for x in predictions:
predArray.append(x)
return Vectors.dense(predArray)
rawPredictionUDF = udf(func, VectorUDT())
aggregatedDataset = aggregatedDataset.withColumn(
self.getRawPredictionCol(), rawPredictionUDF(aggregatedDataset[accColName]))
if self.getPredictionCol():
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(lambda predictions: float(max(enumerate(predictions),
key=operator.itemgetter(1))[0]), DoubleType())
aggregatedDataset = aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName]))
return aggregatedDataset.drop(accColName)
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRestModel`
Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol)\
.setFeaturesCol(featuresCol)
py_stage._set(labelCol=labelCol)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage._set(weightCol=java_stage.getWeightCol())
py_stage._set(classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.set("weightCol", self.getWeightCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestModelReader(cls)
def write(self):
if all(map(lambda elem: isinstance(elem, JavaMLWritable),
[self.getClassifier()] + self.models)):
return JavaMLWriter(self)
else:
return OneVsRestModelWriter(self)
@inherit_doc
class OneVsRestModelReader(MLReader):
def __init__(self, cls):
super(OneVsRestModelReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
numClasses = metadata['numClasses']
subModels = [None] * numClasses
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
subModels[idx] = DefaultParamsReader.loadParamsInstance(subModelPath, self.sc)
ovaModel = OneVsRestModel(subModels)._resetUid(metadata['uid'])
ovaModel.set(ovaModel.classifier, classifier)
DefaultParamsReader.getAndSetParams(ovaModel, metadata, skipParams=['classifier'])
return ovaModel
@inherit_doc
class OneVsRestModelWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestModelWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
instance = self.instance
numClasses = len(instance.models)
extraMetadata = {'numClasses': numClasses}
_OneVsRestSharedReadWrite.saveImpl(instance, self.sc, path, extraMetadata=extraMetadata)
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
instance.models[idx].save(subModelPath)
@inherit_doc
class FMClassifier(_JavaProbabilisticClassifier, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Factorization Machines learning algorithm for classification.
Solver supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.classification import FMClassifier
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> fm = FMClassifier(factorSize=2)
>>> fm.setSeed(11)
FMClassifier...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-1.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(2.0),)], ["features"])
>>> model.predictRaw(test0.head().features)
DenseVector([22.13..., -22.13...])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.transform(test0).select("features", "probability").show(10, False)
+--------+------------------------------------------+
|features|probability |
+--------+------------------------------------------+
|[-1.0] |[0.9999999997574736,2.425264676902229E-10]|
|[0.5] |[0.47627851732981163,0.5237214826701884] |
|[1.0] |[5.491554426243495E-4,0.9994508445573757] |
|[2.0] |[2.005766663870645E-10,0.9999999997994233]|
+--------+------------------------------------------+
...
>>> model.intercept
-7.316665276826291
>>> model.linear
DenseVector([14.8232])
>>> model.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model_path = temp_path + "/fm_model"
>>> model.save(model_path)
>>> model2 = FMClassificationModel.load(model_path)
>>> model2.intercept
-7.316665276826291
>>> model2.linear
DenseVector([14.8232])
>>> model2.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
"""
super(FMClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.FMClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
Sets Params for FMClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMClassificationModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMClassificationModel(_JavaProbabilisticClassificationModel, _FactorizationMachinesParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`FMClassifier`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return FMClassificationTrainingSummary(super(FMClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_fm_summary = self._call_java("evaluate", dataset)
return FMClassificationSummary(java_fm_summary)
class FMClassificationSummary(_BinaryClassificationSummary):
"""
Abstraction for FMClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class FMClassificationTrainingSummary(FMClassificationSummary, _TrainingSummary):
"""
Abstraction for FMClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| BryanCutler/spark | python/pyspark/ml/classification.py | Python | apache-2.0 | 126,641 | 0.002953 |
import pytest
import chainerx
def test_name_native():
backend = chainerx.get_global_default_context().get_backend('native')
assert 'native' == backend.name
def test_get_device_native():
backend = chainerx.get_global_default_context().get_backend('native')
device = backend.get_device(0)
assert 0 == device.index
assert 'native:0' == device.name
assert device is backend.get_device(0)
def test_get_device_count_native():
backend = chainerx.get_global_default_context().get_backend('native')
assert backend.get_device_count() > 0
@pytest.mark.cuda
def test_name_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
assert 'cuda' == backend.name
@pytest.mark.cuda
def test_get_device_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
device = backend.get_device(0)
assert 0 == device.index
assert 'cuda:0' == device.name
assert device is backend.get_device(0)
@pytest.mark.cuda
def test_get_device_count_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
assert backend.get_device_count() > 0
| okuta/chainer | tests/chainerx_tests/unit_tests/test_backend.py | Python | mit | 1,149 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pysia documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pysia
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySia'
copyright = u"2017, Jeffrey McLarty"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pysia.__version__
# The full version, including alpha/beta/rc tags.
release = pysia.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysiadoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pysia.tex',
u'PySia Documentation',
u'Jeffrey McLarty', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysia',
u'PySia Documentation',
[u'Jeffrey McLarty'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysia',
u'PySia Documentation',
u'Jeffrey McLarty',
'pysia',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| jnmclarty/pysia | docs/conf.py | Python | mit | 8,373 | 0.005374 |
# -*- coding: utf-8 -*-
# TODO:
# * Move/rename namespace polluting attributes
# * Documentation
# * Make backends optional: Meta.backends = (path, modelinstance/model, view)
import hashlib
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import SortedDict
from django.utils.functional import curry
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.safestring import mark_safe
from django.core.cache import cache
from django.utils.encoding import iri_to_uri
from rollyourown.seo.utils import NotSet, Literal
from rollyourown.seo.options import Options
from rollyourown.seo.fields import MetadataField, Tag, MetaTag, KeywordTag, Raw
from rollyourown.seo.backends import backend_registry, RESERVED_FIELD_NAMES
registry = SortedDict()
class FormattedMetadata(object):
""" Allows convenient access to selected metadata.
Metadata for each field may be sourced from any one of the relevant instances passed.
"""
def __init__(self, metadata, instances, path, site=None, language=None):
self.__metadata = metadata
if metadata._meta.use_cache:
if metadata._meta.use_sites and site:
hexpath = hashlib.md5(iri_to_uri(site.domain+path)).hexdigest()
else:
hexpath = hashlib.md5(iri_to_uri(path)).hexdigest()
if metadata._meta.use_i18n:
self.__cache_prefix = 'rollyourown.seo.%s.%s.%s' % (self.__metadata.__class__.__name__, hexpath, language)
else:
self.__cache_prefix = 'rollyourown.seo.%s.%s' % (self.__metadata.__class__.__name__, hexpath)
else:
self.__cache_prefix = None
self.__instances_original = instances
self.__instances_cache = []
def __instances(self):
""" Cache instances, allowing generators to be used and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache.
"""
for instance in self.__instances_cache:
yield instance
for instance in self.__instances_original:
self.__instances_cache.append(instance)
yield instance
def _resolve_value(self, name):
""" Returns an appropriate value for the given name.
This simply asks each of the instances for a value.
"""
for instance in self.__instances():
value = instance._resolve_value(name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
# TODO: This is duplicated in meta_models. Move this to a common home.
if name in self.__metadata._meta.elements:
populate_from = self.__metadata._meta.elements[name].populate_from
if callable(populate_from):
return populate_from(None)
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
def __getattr__(self, name):
# If caching is enabled, work out a key
if self.__cache_prefix:
cache_key = '%s.%s' % (self.__cache_prefix, name)
value = cache.get(cache_key)
else:
cache_key = None
value = None
# Look for a group called "name"
if name in self.__metadata._meta.groups:
if value is not None:
return value or None
value = '\n'.join(unicode(BoundMetadataField(self.__metadata._meta.elements[f], self._resolve_value(f))) for f in self.__metadata._meta.groups[name]).strip()
# Look for an element called "name"
elif name in self.__metadata._meta.elements:
if value is not None:
return BoundMetadataField(self.__metadata._meta.elements[name], value or None)
value = self._resolve_value(name)
if cache_key is not None:
cache.set(cache_key, value or '')
return BoundMetadataField(self.__metadata._meta.elements[name], value)
else:
raise AttributeError
if cache_key is not None:
cache.set(cache_key, value or '')
return value or None
def __unicode__(self):
""" String version of this object is the html output of head elements. """
if self.__cache_prefix is not None:
value = cache.get(self.__cache_prefix)
else:
value = None
if value is None:
value = mark_safe(u'\n'.join(unicode(getattr(self, f)) for f,e in self.__metadata._meta.elements.items() if e.head))
if self.__cache_prefix is not None:
cache.set(self.__cache_prefix, value or '')
return value
class BoundMetadataField(object):
""" An object to help provide templates with access to a "bound" metadata field. """
def __init__(self, field, value):
self.field = field
if value:
self.value = field.clean(value)
else:
self.value = None
def __unicode__(self):
if self.value:
return mark_safe(self.field.render(self.value))
else:
return u""
def __str__(self):
return self.__unicode__().encode("ascii", "ignore")
class MetadataBase(type):
def __new__(cls, name, bases, attrs):
# TODO: Think of a better test to avoid processing Metadata parent class
if bases == (object,):
return type.__new__(cls, name, bases, attrs)
# Save options as a dict for now (we will be editing them)
# TODO: Is this necessary, should we bother relaying Django Meta options?
Meta = attrs.pop('Meta', {})
if Meta:
Meta = Meta.__dict__.copy()
# Remove our options from Meta, so Django won't complain
help_text = attrs.pop('HelpText', {})
# TODO: Is this necessary
if help_text:
help_text = help_text.__dict__.copy()
options = Options(Meta, help_text)
# Collect and sort our elements
elements = [(key, attrs.pop(key)) for key, obj in attrs.items()
if isinstance(obj, MetadataField)]
elements.sort(lambda x, y: cmp(x[1].creation_counter,
y[1].creation_counter))
elements = SortedDict(elements)
# Validation:
# TODO: Write a test framework for seo.Metadata validation
# Check that no group names clash with element names
for key,members in options.groups.items():
assert key not in elements, "Group name '%s' clashes with field name" % key
for member in members:
assert member in elements, "Group member '%s' is not a valid field" % member
# Check that the names of the elements are not going to clash with a model field
for key in elements:
assert key not in RESERVED_FIELD_NAMES, "Field name '%s' is not allowed" % key
# Preprocessing complete, here is the new class
new_class = type.__new__(cls, name, bases, attrs)
options.metadata = new_class
new_class._meta = options
# Some useful attributes
options._update_from_name(name)
options._register_elements(elements)
try:
for backend_name in options.backends:
new_class._meta._add_backend(backend_registry[backend_name])
for backend_name in options.backends:
backend_registry[backend_name].validate(options)
except KeyError:
raise Exception('Metadata backend "%s" is not installed.' % backend_name)
#new_class._meta._add_backend(PathBackend)
#new_class._meta._add_backend(ModelInstanceBackend)
#new_class._meta._add_backend(ModelBackend)
#new_class._meta._add_backend(ViewBackend)
registry[name] = new_class
return new_class
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_formatted_data(cls, path, context=None, site=None, language=None):
""" Return an object to conveniently access the appropriate values. """
return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language)
# TODO: Move this function out of the way (subclasses will want to define their own attributes)
def _get_instances(cls, path, context=None, site=None, language=None):
""" A sequence of instances to discover metadata.
Each instance from each backend is looked up when possible/necessary.
This is a generator to eliminate unnecessary queries.
"""
backend_context = {'view_context': context }
for model in cls._meta.models.values():
for instance in model.objects.get_instances(path, site, language, backend_context) or []:
if hasattr(instance, '_process_context'):
instance._process_context(backend_context)
yield instance
class Metadata(object):
__metaclass__ = MetadataBase
def _get_metadata_model(name=None):
# Find registered Metadata object
if name is not None:
try:
return registry[name]
except KeyError:
if len(registry) == 1:
valid_names = u'Try using the name "%s" or simply leaving it out altogether.'% registry.keys()[0]
else:
valid_names = u"Valid names are " + u", ".join(u'"%s"' % k for k in registry.keys())
raise Exception(u"Metadata definition with name \"%s\" does not exist.\n%s" % (name, valid_names))
else:
assert len(registry) == 1, "You must have exactly one Metadata class, if using get_metadata() without a 'name' parameter."
return registry.values()[0]
def get_metadata(path, name=None, context=None, site=None, language=None):
metadata = _get_metadata_model(name)
return metadata._get_formatted_data(path, context, site, language)
def get_linked_metadata(obj, name=None, context=None, site=None, language=None):
""" Gets metadata linked from the given object. """
# XXX Check that 'modelinstance' and 'model' metadata are installed in backends
# I believe that get_model() would return None if not
Metadata = _get_metadata_model(name)
InstanceMetadata = Metadata._meta.get_model('modelinstance')
ModelMetadata = Metadata._meta.get_model('model')
content_type = ContentType.objects.get_for_model(obj)
instances = []
if InstanceMetadata is not None:
try:
instance_md = InstanceMetadata.objects.get(_content_type=content_type, _object_id=obj.pk)
except InstanceMetadata.DoesNotExist:
instance_md = InstanceMetadata(_content_object=obj)
instances.append(instance_md)
if ModelMetadata is not None:
try:
model_md = ModelMetadata.objects.get(_content_type=content_type)
except ModelMetadata.DoesNotExist:
model_md = ModelMetadata(_content_type=content_type)
instances.append(model_md)
return FormattedMetadata(Metadata, instances, '', site, language)
def create_metadata_instance(metadata_class, instance):
# If this instance is marked as handled, don't do anything
# This typically means that the django admin will add metadata
# using eg an inline.
if getattr(instance, '_MetadataFormset__seo_metadata_handled', False):
return
metadata = None
content_type = ContentType.objects.get_for_model(instance)
# If this object does not define a path, don't worry about automatic update
try:
path = instance.get_absolute_url()
except AttributeError:
return
# Look for an existing object with this path
language = getattr(instance, '_language', None)
site = getattr(instance, '_site', None)
for md in metadata_class.objects.get_instances(path, site, language):
# If another object has the same path, remove the path.
# It's harsh, but we need a unique path and will assume the other
# link is outdated.
if md._content_type != content_type or md._object_id != instance.pk:
md._path = md._content_object.get_absolute_url()
md.save()
# Move on, this metadata instance isn't for us
md = None
else:
# This is our instance!
metadata = md
# If the path-based search didn't work, look for (or create) an existing
# instance linked to this object.
if not metadata:
metadata, md_created = metadata_class.objects.get_or_create(_content_type=content_type, _object_id=instance.pk)
metadata._path = path
metadata.save()
def populate_metadata(model, MetadataClass):
""" For a given model and metadata class, ensure there is metadata for every instance.
"""
content_type = ContentType.objects.get_for_model(model)
for instance in model.objects.all():
create_metadata_instance(MetadataClass, instance)
def _update_callback(model_class, sender, instance, created, **kwargs):
""" Callback to be attached to a post_save signal, updating the relevant
metadata, or just creating an entry.
NB:
It is theoretically possible that this code will lead to two instances
with the same generic foreign key. If you have non-overlapping URLs,
then this shouldn't happen.
I've held it to be more important to avoid double path entries.
"""
create_metadata_instance(model_class, instance)
def _delete_callback(model_class, sender, instance, **kwargs):
content_type = ContentType.objects.get_for_model(instance)
model_class.objects.filter(_content_type=content_type, _object_id=instance.pk).delete()
def register_signals():
for metadata_class in registry.values():
model_instance = metadata_class._meta.get_model('modelinstance')
if model_instance is not None:
update_callback = curry(_update_callback, model_class=model_instance)
delete_callback = curry(_delete_callback, model_class=model_instance)
## Connect the models listed in settings to the update callback.
for model in metadata_class._meta.seo_models:
models.signals.post_save.connect(update_callback, sender=model, weak=False)
models.signals.pre_delete.connect(delete_callback, sender=model, weak=False)
| shirishagaddi/django-seo | rollyourown/seo/base.py | Python | bsd-3-clause | 14,784 | 0.004532 |
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from flask import url_for
from myflaskapp.models.user import User
from .factories import UserFactory
class TestLoggingIn:
def test_can_log_in_returns_200(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "Unknown user" in res
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
| wdm0006/myflaskapp | tests/test_functional.py | Python | bsd-3-clause | 3,662 | 0.000273 |
"""
App wide event registry
Everything in the application is communicated via pubsub. These are the events
that tie everything together.
"""
import wx # type: ignore
WINDOW_STOP = wx.Window.NewControlId()
WINDOW_CANCEL = wx.Window.NewControlId()
WINDOW_CLOSE = wx.Window.NewControlId()
WINDOW_START = wx.Window.NewControlId()
WINDOW_RESTART = wx.Window.NewControlId()
WINDOW_EDIT = wx.Window.NewControlId()
WINDOW_CHANGE = wx.Window.NewControlId()
PANEL_CHANGE = wx.Window.NewControlId()
LIST_BOX = wx.Window.NewControlId()
CONSOLE_UPDATE = wx.Window.NewControlId()
EXECUTION_COMPLETE = wx.Window.NewControlId()
PROGRESS_UPDATE = wx.Window.NewControlId()
TIME_UPDATE = wx.Window.NewControlId()
USER_INPUT = wx.Window.NewControlId()
LEFT_DOWN = wx.Window.NewControlId()
| chriskiehl/Gooey | gooey/gui/events.py | Python | mit | 840 | 0.014286 |
from pixie.vm.reader import read, StringReader
from pixie.vm.object import Object
from pixie.vm.cons import Cons
from pixie.vm.numbers import Integer
from pixie.vm.symbol import symbol, Symbol
from pixie.vm.persistent_vector import PersistentVector
import pixie.vm.rt as rt
import unittest
data = {u"(1 2)": (1, 2,),
u"(foo)": (symbol(u"foo"),),
u"foo": symbol(u"foo"),
u"1": 1,
u"2": 2,
u"((42))": ((42,),),
u"(platform+ 1 2)": (symbol(u"platform+"), 1, 2),
u"[42 43 44]": [42, 43, 44],
u"(1 2 ; 7 8 9\n3)": (1, 2, 3,),
u"(1 2 ; 7 8 9\r\n3)": (1, 2, 3,)}
class TestReader(unittest.TestCase):
def _compare(self, frm, to):
if isinstance(to, tuple):
assert isinstance(frm, Cons)
for x in to:
self._compare(frm.first(), x)
frm = frm.next()
elif isinstance(to, int):
assert isinstance(frm, Integer)
assert frm._int_val == to
elif isinstance(to, Symbol):
assert isinstance(frm, Symbol)
assert frm._str == to._str
elif isinstance(to, list):
assert isinstance(frm, PersistentVector)
for x in range(len(to)):
self._compare(rt.nth(frm, rt.wrap(x)), to[x])
else:
raise Exception("Don't know how to handle " + str(type(to)))
def test_forms(self):
for s in data:
tst = data[s]
result = read(StringReader(s), True)
assert isinstance(result, Object)
self._compare(result, tst)
| andrewchambers/pixie | pixie/vm/test/test_reader.py | Python | gpl-3.0 | 1,607 | 0.001245 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serves the TensorFlow Profiler UI."""
import os
import threading
import webbrowser
import flask
from .route_handlers import handle_home_page
from .route_handlers import handle_loading_page
from .route_handlers import handle_profile_api
from .utils import prepare_tmp_dir
def start_server(port, open_browser):
"""Starts Flask web server."""
# Define and prepare directories.
resources_dir = os.path.dirname(os.path.realpath(__file__))
static_dir = os.path.join(resources_dir, 'static')
templates_dir = os.path.join(resources_dir, 'templates')
prepare_tmp_dir()
# Create Flask app.
app = flask.Flask(
__name__, static_folder=static_dir, template_folder=templates_dir)
# Enable verbose error messages.
app.config['PROPAGATE_EXCEPTIONS'] = True
# Disable HTML caching.
app.config['TEMPLATES_AUTO_RELOAD'] = True
# Define routes.
@app.route('/')
def home():
"""Responds to request for home page."""
return handle_home_page()
@app.route('/profile')
def profile():
"""Responds to request for profile API."""
# Build options.
return handle_profile_api()
@app.route('/loading')
def loading():
"""Responds to request for loading page."""
return handle_loading_page()
# Define URL.
host = '0.0.0.0'
url = 'http://localhost:{}'.format(port)
if open_browser:
# Open new browser window after short delay.
threading.Timer(1, lambda: webbrowser.open(url)).start()
# Starting the server, and then opening browser after a delay
app.run(host, port, threaded=True)
| tensorflow/profiler-ui | server/server.py | Python | apache-2.0 | 2,149 | 0.011633 |
# -*- coding: utf-8 -*-
#
# django-bulbs documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 18 16:55:34 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
print(sys.path)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-bulbs'
copyright = u'2013, Onion Tech Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-bulbsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-bulbs.tex', u'django-bulbs Documentation',
u'Onion Tech Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-bulbs', u'django-bulbs Documentation',
[u'Onion Tech Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-bulbs', u'django-bulbs Documentation',
u'Onion Tech Team', 'django-bulbs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
#'http://docs.python.org/': None,
'https://docs.djangoproject.com/en/dev': 'https://docs.djangoproject.com/en/dev/_objects',
}
| pombredanne/django-bulbs | docs/conf.py | Python | mit | 8,285 | 0.007483 |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, absolute_import, print_function
import json
import copy
"""
Oграничения режима sequence_set:
1) OCCMATRIX не копируется для дочерних сетов
2) Режим U-ramping выключен для дочерних сетов
3) Есть еще, режим afm_ordering, возможно neb
4) kpoints file только для первого сета
5) u-ramping inherit_xred - могут быть проблемы более чем для двух сетов
TODO:
ngkpt_dict_for_kspacings - when ngkpt is used could be problems, please test.
"""
from siman import header
from siman.header import print_and_log, printlog;
from siman.small_functions import is_list_like, red_prec
from siman.functions import invert
#Vasp keys
vasp_electronic_keys = [
'ALGO',
'PREC',
'LREAL',
'ENCUT',
'ENAUG',
'ISMEAR',
'SIGMA',
'EDIFF',
'NELM',
'NELMIN',
'NELMDL',
'MAXMIX',
'NELECT'
]
vasp_ionic_keys = [
'IBRION',
'ISIF',
'NSW',
'EDIFFG',
'POTIM',
'POMASS',
'ZVAL',
'SMASS'
]
vasp_other_keys = [
'AGGAC',
'LUSE_VDW',
'PARAM1',
'PARAM2',
'LVDW',
'LVHAR',
'LCALCPOL',
'EFIELD',
'VDW_RADIUS',
'VDW_SCALING',
'VDW_CNRADIUS',
'LWANNIER90_RUN',
'IVDW',
'VDW_D',
'MDALGO',
'TEBEG',
'TEEND',
'SYSTEM',
'ISTART',
'ICHARG',
'KGAMMA',
'KSPACING',
'EFIELD_PEAD',
'LPLANE',
'LSEPC',
'LSEPB',
'OMEGAMAX',
'ENCUTGW',
'NBANDSGW',
'NBANDSO',
'NBANDSV',
'ANTIRES',
'NOMEGA',
'OMEGATL',
'NCORE',
'NPAR',
'LSCALU',
'NSIM',
'ISYM',
'SYMPREC',
'LORBIT',
'EMIN',
'EMAX',
'NEDOS',
'LAECHG',
'LSORBIT',
'SAXIS',
'ISPIN',
'NBANDS',
'PSTRESS',
'ADDGRID',
'MAGMOM',
'GGA_COMPAT',
'IMAGES',
'LDAU',
'LDAUTYPE',
'LDAUL',
'LDAUU',
'LDAUJ',
'LDAUPRINT',
'LASPH',
'LMAXMIX',
'NFREE',
'AMIX',
'BMIX',
'AMIX_MAG',
'BMIX_MAG',
'WC',
'MAXMIX',
'OCCDIR1',
'OCCEXT',
'LHFCALC',
'HFSCREEN',
'TIME',
'PRECFOCK',
'NKRED',
'NGX',
'NGY',
'NGZ',
'NBMOD',
'LPARD',
'EINT',
'LWAVE',
'GGA',
'IALGO',
'LSCALAPACK',
'AMIN',
'IDIPOL',
'LDIPOL',
'DIPOL',
'LVTOT',
'AEXX',
'LDIAG',
'METAGGA',
'CMBJB',
'CMBJA',
'IMIX',
'LPEAD',
'LEPSILON',
'LCALCEPS',
'CSHIFT',
'LOPTICS',
'LRPA',
'LSPECTRAL',
'LCHARG',
'LELF',
'RWIGS',
'NUPDOWN',
'ALDAC',
'LAMBDA',
'SUBATOM',
'KPPRA',
'LAMBDA_D_K',
'USEPOT',
'M_CONSTR',
'I_CONSTRAINED_M',
'CORE_C',
'EB_K',
'LVDW_EWALD',
]
vasp_keys = vasp_electronic_keys+vasp_ionic_keys+vasp_other_keys
siman_keys = [
'universal', # universal paramater with any content
'u_ramping_region', #deprecated
'u_ramping_nstep', #number of u ramping steps
'magnetic_moments',
'afm_ordering',
'set_sequence',# sequence of sets
'savefile', #additional keys pointing which files should be saved
'k_band_structure', # list, first position is number of points, then high-symmetry k-points in the form ['G', 0, 0, 0] in reciprocal space for calculating band structure
'path2pot', # path to folder with potentials - used with potdir; if not provided that header.path2potentials is used
'path_to_potcar', # explicit path to potential - depreacated
'periodic', # 1 or 0, periodic boundary conditions or not; by default considered periodic
]
aims_keys = [
'k_grid',
'default_initial_moment',
'spin',
]
def read_vasp_sets(user_vasp_sets, override_global = False):
"""
Read user sets and add them to project database
Now for VASP
###INPUT:
- varset (dict) - database dict with all sets of a project
- user_vasp_sets (list) - list of user sets that describes creation of new sets based on inheritance
- override - allows to recreate all sets; can be usefull than you want to add some new property to all your sets - very dangerous to do!
###RETURN:
- user_vasp_sets (list)
"""
varset = header.varset
bfolder = '' #by default no blockfolder
for l in user_vasp_sets:
if override_global or 'over' in l[-1]:
override = True
else:
override = False
if override or l[0] not in varset:
# print override, 'override'
param = l[2]
if 'bfolder' in param:
bfolder = param['bfolder']
else:
bfolder = None
s = inherit_iset(l[0], l[1], varset, override = override, newblockfolder = bfolder)
# print ('param', param,)
s.load(param, inplace = True)
header.varset = varset
return varset
class InputSet():
"""docstring for InputSet
The second important class which is used to store
parameters of calculation
For VASP parameters *self.vasp_params* dict is used;
usually it contains the parameters in the same format as INCAR file.
However, several exceptions are:
for 'LDAUU', 'LDAUJ', 'LDAUL' you should provide
dictionaries with correponding values for each element in the form: {'Co':3.4,}.
self.potdir (dict) - name of POTCAR folder for each element, for example {3:'Li', 8:'O'}
self.blockfolder (str) - additional subfolder will be created calculation with this set
self.save_last_wave (bool) - set True to save last WAVECAR in u-ramping mode
self.kpoints_file - if True, k-points file is created, if string then it is considered as path to external kpoints file
self.path_to_potcar (str) - explicit path to potcar, can be used instead of self.potdir
self.set_sequence (list) - list of InputSet() objects to make multiset runs. The current set is used as a first one.
TODO
Describe the difference between update() and load() methods !
"""
def __init__(self, ise = None, path_to_potcar = None):
#super(InputSet, self).__init__()
self.ise = ise
self.name = ise
self.des = "" # description
self.potdir = {}
self.units = "vasp"
self.vasp_params = {}
self.params = self.vasp_params # params for any code!
self.mul_enaug = 1
self.history = "Here is my uneasy history( :\n"
self.tsmear = None
self.tolmxf = None
self.ngkpt = None
self.blockfolder = ''
self.set_sequence = None
self.kpoints_file = None # can be path to external file
self.save_last_wave = None #if True than do no remove last wavefunction
self.periodic = 1 # PBC
# self.use_ngkpt = False
if path_to_potcar:
self.path_to_potcar = path_to_potcar
else:
self.path_to_potcar = None
#Code scpecific parameters, now only for Vasp
for key in vasp_electronic_keys:
self.vasp_params[key] = None
for key in vasp_ionic_keys:
self.vasp_params[key] = None
for key in vasp_other_keys:
self.vasp_params[key] = None
for key in aims_keys:
self.params[key] = None
#add to varset
# if ise not in header.varset:
header.varset[ise] = self
def printme(self):
for key in self.vasp_params:
if self.vasp_params[key] == None: continue
print_and_log( "{:30s} = {:s} ".format("s.vasp_params['"+key+"']", str(self.vasp_params[key]) ), imp = 'Y', end = '\n' )
printlog('ngkpt:', self.ngkpt, imp = 'Y')
printlog('POTDIR:', self.potdir, imp = 'Y', end = '\n' )
def update(self):
#deprecated, but still can be usefull
# print_and_log('Updating set ...\n')
# c1 = 1; c2 = 1
# if self.units == "abinit":
# c1 = to_eV
# c2 = Ha_Bohr_to_eV_A
# #Update Vasp parameters
# if self.units == "vasp":
# c1 = 1
# c2 = 1
# if self.ecut == None:
# self.vasp_params['ENCUT'] = None
# self.vasp_params['ENAUG'] = None
# else:
# self.vasp_params['ENCUT'] = self.ecut * c1* self.dilatmx * self.dilatmx
# self.vasp_params['ENAUG'] = self.mul_enaug * self.vasp_params['ENCUT']
# self.vasp_params['SIGMA'] = self.tsmear * c1
vp = self.vasp_params
self.tsmear = vp.get('SIGMA')
self.tolmxf = vp.get('EDIFFG')
if self.tolmxf and self.tolmxf < 0:
self.tolmxf*=-1
self.toldfe = vp.get('EDIFF')
# self.vasp_params['EDIFF'] = self.toldfe * c1
# self.vasp_params['NELM'] = self.nstep
# self.vasp_params['NSW'] = self.ntime
# self.vasp_params['EDIFFG'] = -self.tolmxf * c2
self.kspacing = vp.get('KSPACING')
self.ecut = vp.get('ENCUT')
# print (self.vasp_params)
if 'LDAUU' in self.vasp_params and self.vasp_params['LDAUU']:
self.dftu = True
else:
self.dftu = False
if 'ISPIN' in self.vasp_params and self.vasp_params['ISPIN'] == 2:
self.spin_polarized = True
else:
self.spin_polarized = False
def load(self,param, inplace = False):
"""
Update parameters of set from dict param
"""
# print(param)
if inplace:
s = self
else:
s = copy.deepcopy(self)
for key in param:
if key in vasp_keys:
s.set_vaspp(key, param[key])
elif key == 'set_potential':
for key2 in param[key]:
# print key2, 'key2'
s.set_potential(key2, param[key][key2])
elif key == 'add_nbands':
# print param[key]
s.set_add_nbands(param[key])
elif key == 'ngkpt':
s.set_ngkpt(param[key])
elif key == 'kpoints_file':
if param[key]:
s.kpoints_file = True
else:
''
s.kpoints_file = False
elif key == 'bfolder':
print_and_log( 'New blockfolder', param[key])
elif key in siman_keys:
s.set_attrp(key, param[key] )
elif key in aims_keys:
s.set_vaspp(key, param[key] )
else:
print_and_log('Error! Unknown key: '+key)
raise RuntimeError
if key == 'set_sequence':
sets = []
for se in s.set_sequence:
sets.append(copy.deepcopy(header.varset[se]))
s.set_sequence = sets #put objects instead of names
# if hasattr(s, 'set_sequence') and s.set_sequence:
# sets = []
# for se in s.set_sequence:
# if type(se) == str:
# sets.append(copy.deepcopy(varset[se]))
# else:
# sets.append(copy.deepcopy(se))
# s.set_sequence = sets #put objects instead of names
return s
def read_universal(self, filename):
#read any file to univeral parameter
with open(filename, 'r') as f:
fil = f.read()
self.params['universal'] = fil
def read_incar(self, filename):
with open(filename, 'r') as f:
fil = f.read()
fil = fil.replace(';','\n').splitlines()
for l in fil:
if '=' in l:
(token, value) = l.split('=')
value = value.strip()
try:
if '.' in value:
value = float(value)
else:
value = int(value)
except:
pass
self.vasp_params[token.strip()] = value
# self.update()
def add_conv_kpoint(self,arg):
if type(arg) is not str:
sys.exit("\nadd_conv_kpoint error\n")
if arg in self.conv_kpoint:
print_and_log( "Warning! You already have this name in list")
return
self.conv_kpoint.append(arg)
self.history += "Name "+arg+" was added to self.conv_kpoint\n"
self.update()
def add_conv_tsmear(self,arg):
if type(arg) is not str:
sys.exit("\nadd_conv_tsmear type error\n")
try:
self.conv_tsmear[0]
except AttributeError:
print_and_log( "Error! Set "+self.ise+" does not have conv_tsmear, I create new\n")
self.conv_tsmear = []
if arg in self.conv_tsmear:
print_and_log( "Warning! You already have this name in list", imp = 'y')
return
self.conv_tsmear.append(arg)
self.history += "Name "+arg+" was added to self.conv_tsmear\n"
self.update()
def add_conv(self,arg,type_of_conv):
if type(arg) is not str:
raise TypeError
if type_of_conv not in ["kpoint_conv","tsmear_conv","ecut_conv","nband_conv","npar_conv"]:
raise TypeError
try:
self.conv[type_of_conv][0]
except AttributeError:
print_and_log( "Warning! Set "+self.ise+" does not have conv, I create new\n")
self.conv = {}
except KeyError:
print_and_log( "Warning! Set "+self.ise+" does not have list for this key in conv, I add new\n")
self.conv[type_of_conv] = []
except IndexError:
pass
if arg in self.conv[type_of_conv]:
print_and_log( "Warning! You already have name %s in list of conv %s. Nothing done.\n" % \
(str(arg), str(self.conv[type_of_conv]) ) )
return
self.conv[type_of_conv].append(arg)
self.history += "Name "+arg+" was added to self.conv["+type_of_conv+"]\n"
print_and_log( "Name "+arg+" was added to self.conv["+type_of_conv+"] of set "+self.ise+" \n")
self.update()
def set_compare_with(self,arg):
if type(arg) is not str:
raise TypeError ("\nset_compare_with error\n")
self.compare_with += arg+" "
def set_potential(self,znucl, arg = ''):
# print arg
if not arg:
arg = header.PATH2POTENTIALS+'/'+invert(znucl)
printlog('Attention!, Default potentials is chosen from ',header.PATH2POTENTIALS, 'for',invert(znucl) , imp ='Y')
if type(arg) not in (str,):
# sys.exit("\nset_potential error\n")
raise RuntimeError
if znucl in self.potdir:
if arg == self.potdir[znucl]:
print_and_log( "Warning! You already have the same potential for "+str(znucl)+" element\n" )
# print type(self.potdir)
self.potdir[znucl] = arg
self.history += "Potential for "+str(znucl)+" was changed to "+arg+"\n"
print_and_log( "Potential for "+str(znucl)+" was changed to "+arg+"\n" )
# self.update()
return
def set_relaxation_type(self,type_of_relaxation):
name = "Type of relaxation ISIF"
if type(type_of_relaxation) not in [str, ]:
raise TypeError
old = self.vasp_params["ISIF"]
if "ions" == type_of_relaxation:
if int(self.ise[0]) != 9:
print_and_log("Warning! The name of set is uncostintent with relaxation type\n")
raise TypeError
self.vasp_params["ISIF"] = 2
# self.set_nmdsteps(200)
elif type_of_relaxation == "full":
if int(self.ise[0]) != 2:
print_and_log("Warning! The name of set is uncostintent with relaxation type\n")
raise TypeError
self.vasp_params["ISIF"] = 3
else:
print_and_log("Error! Uncorrect type of relaxation\n")
raise TypeError
arg = self.vasp_params["ISIF"]
if old == arg:
print_and_log("Warning! You did not change "+name+" in "+self.ise+" set\n")
return
self.history += " "+name+" was changed from "+str(old)+" to "+str(arg) + "\n"
print_and_log(name+" was changed from "+str(old)+" to "+str(arg) + " in set "+self.ise+" \n")
self.update()
#print self.history
return
def set_add_nbands(self,arg):
name = "add_nbands"
# print(type(arg))
if type(arg) not in [float, int, type(None) ]:
raise TypeError
try:
self.add_nbands
except AttributeError:
self.add_nbands = None
old = self.add_nbands
self.add_nbands = arg
if old == arg:
print_and_log("Warning! You did not change "+name+" in "+self.ise+" set\n")
return
self.history += " "+name+" was changed from "+str(old)+" to "+str(arg) + "\n"
print_and_log(" "+name+" was changed from "+str(old)+" to "+str(arg) + " in set "+self.ise+" \n")
return #ISTAR
def set_ngkpt(self,arg):
if not is_list_like(arg):
printlog("Error! set_ngkpt type error")
old = copy.copy(self.ngkpt)
self.ngkpt = copy.copy(arg)
self.kpoints_file = True
self.vasp_params['KSPACING'] = None
if old == arg:
print_and_log( "Warning! You did not change one of your parameters in new set", imp = 'y')
return
self.history += "ngkpt was changed from "+str(old)+" to "+str(arg) + " and KPOINTS file was swithed on\n"
return
def set_vaspp(self, token, arg, des = "see manual"):
"""
Used for setting vasp parameters.
"""
# print(token, arg)
if token in ("ISMEAR",):
if type(arg) not in [int, type(None), ]:
raise TypeError
if token in ("KSPACING",):
# print(type(arg))
if type(arg) not in [float, type(None), ]:
raise TypeError
old = self.vasp_params.get(token)
self.vasp_params[token] = arg
if old == arg:
print_and_log("Warning! You did not change "+token+" in "+self.ise+" set\n")
else:
self.history += " "+token+" was changed from "+str(old)+" to "+str(arg) + "\n"
print_and_log(token+" was changed from "+str(old)+" to "+str(arg) +" - "+ des+" in set "+self.ise+" \n")
self.update()
return
def set_attrp(self, token, arg, des = "see manual"):
"""
set any attribute.
"""
# print (token)
if hasattr(self, token):
old = getattr(self, token)
if old == arg:
print_and_log("Warning! You did not change "+token+" in "+self.ise+" set\n")
else:
setattr(self, token, arg)
self.history += " "+token+" was changed from "+str(old)+" to "+str(arg) + "\n"
print_and_log(token+" was changed from "+str(old)+" to "+str(arg) +" - "+ des+" in set "+self.ise+" \n")
else:
setattr(self, token, arg)
print_and_log("New attribute "+token+" added to "+self.ise+" set\n")
self.history += " "+token+" was added as a new attr with "+str(arg) + " value \n"
return
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def toabinit(self, st):
"""
Convert from VASP (add more codes in the future) to Abinit
"""
def special_convert(vasp_param, vasp_dic, abi_dic):
''
return dic
special = {'EDIFFG', 'IBRION', 'ISIF', 'KSPACING', 'KGAMMA', 'ISMEAR', 'LDAU', 'LDAUL','LDAUU','LDAUJ',}
skip = {'PREC', 'ALGO', 'POTIM'}
VASP2Abi = {
'ENCUT':'ecut',
# 'ENAUG':'pawecutdg',
'EDIFF':'toldfe',
'EDIFFG':'tolmxf',
'NELM':'nstep',
'NSW':'ntime',
# 'IBRION':'ionmov',
# 'ISIF':'optcell',
# 'PREC':['ngfft', 'boxcutmin',
# 'ALGO':'iscf',
# 'KSPACING':'ngkpt',
# 'KGAMMA':'shiftk', #nshiftk
'LREAL':None,
'ISMEAR':'occopt',
'SIGMA':'tsmear',
'LPLANE':None,
# 'POTIM':'dtion',
'LORBIT':None,
'ISPIN':'nsppol',
'LDAU':'usepawu',
'LDAUTYPE':None,
'LDAUL':'lpawu',
'LDAUU':'upawu',
'LDAUJ':'jpawu',
'LASPH':None,
'LMAXMIX':None,
}
abi_dic = {}
vp = self.vasp_params
en = 1/header.to_eV
fo = 1/header.Ha_Bohr_to_eV_A
le = 1/header.to_ang
for p in vp:
''
if p in skip or p not in VASP2Abi:
continue
if VASP2Abi[p] is None:
continue
v = vp[p]
abinam = VASP2Abi[p]
if p == 'EDIFFG':
aval = red_prec(v*-1*fo)
elif p in ['ENCUT', 'EDIFF', 'ENAUG', 'SIGMA']:
aval = red_prec(v*en )
elif p in ['LDAU']:
if 'T' in v:
aval = 1
else:
aval = 0
elif p == 'LDAUL':
aval = 2 # d metals
elif p == 'ISMEAR':
if v == 0:
#Gaussian
aval =7
elif v == -5:
aval = 7 # still gauss !
else:
aval = vp[p]
abi_dic[abinam] = aval
for p in abi_dic:
print(p, abi_dic[p])
print('autoparal 1')
print('boxcutmin 1.5') # prec normal
print('pawecutdg', abi_dic['ecut']*2) # fine mesh
print('ngkpt ','put here' )
from textwrap import wrap
import numpy as np
mag_str = '0 0 '+' 0 0 '.join(np.array(st.magmom).astype(str))
print('spinat', '\n'.join(wrap(mag_str)) )
def inherit_iset(ise_new, ise_from, varset, override = False, newblockfolder = None):
""" Create new set copying from existing and update some fields. If ise_from does not exist create new"""
ise_new = ise_new.strip()
ise_from = ise_from.strip()
if ise_from not in varset:
printlog( "\nWarning! Set "+ise_from+" does not exist. I return new empty set\n")
return InputSet(ise_new)
old = varset[ise_from]
for key in vasp_electronic_keys+vasp_ionic_keys+vasp_other_keys: #check if new keys was added
if key not in old.vasp_params:
old.vasp_params[key] = None
if override:
print_and_log( "\nAttention! You have chosen to override set "+ise_new+"\n")
elif ise_new in varset:
print_and_log( "\nSet "+ise_new+" already exists. I return it without changes. Be carefull not to spoil it\n")
return varset[ise_new]
new = copy.deepcopy( old )
new.ise = ise_new
new.compare_with = ise_from+" "
new.des = "no description for these set, see history"
new.conv = {}
print_and_log( "New set "+ise_new+" was inherited from set "+ise_from+"\n")
new.history = old.history + "\nSet "+ise_new+" was inherited from: "+ ise_from +"\n"
if newblockfolder:
new.history += 'blockfolder changed from '+new.blockfolder+' to '+newblockfolder+'\n'
new.blockfolder = newblockfolder
varset[ise_new] = new
return new
def make_sets_for_conv(isefrom,conv,list_of_parameters,varset):
varset[isefrom].add_conv( isefrom, conv ); i = len(varset[isefrom].conv[conv])
#print varset[isefrom].conv[conv]
for param in list_of_parameters:
newise = isefrom+conv[0:2]+str(i) ; i+=1
if newise in varset:
print_and_log("Set %s already in varset; continue\n" %( str(newise) ) )
continue
if conv == "kpoint_conv":
for key in varset[isefrom].conv[conv]:
if varset[key].ngkpt == param:
print_and_log( "Set %s already contains param %s; please check; return; \n" %( str(key), str(param) ) )
return
#print newise
s = inherit_iset(newise, isefrom, varset,newblockfolder = conv)
s.set_ngkpt(param)
#print s
elif conv == "tsmear_conv":
for key in varset[isefrom].conv[conv]:
if varset[key].tsmear == param:
print_and_log( "Set %s already contains param %s; please check; return; \n" %( str(key), str(param) ) )
return
s = inherit_iset(newise, isefrom, varset,newblockfolder = conv)
s.set_tsmear(param)
elif conv == "ecut_conv":
#automatically set dilatmx == 1
for key in varset[isefrom].conv[conv]:
if varset[key].vasp_params["ENCUT"] == param:
print_and_log( "Set %s already contains param %s; please check; return; \n" %( str(key), str(param) ) )
return
s = inherit_iset(newise, isefrom, varset,newblockfolder = conv)
s.set_dilatmx(1.)
s.set_ecut(param)
else:
print_and_log( "Warning! Unknown type of conv; return\n")
return
varset[isefrom].add_conv( newise, conv )
print_and_log( "The following sets are in varset[%s].conv %s \n"%(str(isefrom),str(varset[isefrom].conv) ) )
return
def init_default_sets(init = 0):
"""
Pre-defined sets for Vasp
"""
varset = header.varset
setname = 'aks'
if init or setname not in varset: #init only once
s = InputSet(setname) #default starting set without relaxation
s.kpoints_file = True
s.add_nbands = 1.25
s.vasp_params = {
'NELM' : 50,
'IBRION' : 1,
'KGAMMA' : ".TRUE.",
'ENCUT' : 441.0,
'EDIFFG' : 0,
'SIGMA' : 0.2,
'NELMIN' : 4,
'ISTART' : 0,
'LSCALU' : ".FALSE.",
'MAXMIX' : 40,
'NSIM' : 4,
'ISIF' : 2,
'EDIFF' : 6e-06,
'ENAUG' : 776.16,
'NSW' : 0,
'LPLANE' : ".TRUE.",
'LREAL' : "Auto",
'ISMEAR' : 2,
'NPAR' : 1,
'ALGO' : "Normal",
'PREC' : "Normal",
'KSPACING' : 0.235,
}
s.potdir = copy.deepcopy(header.nu_dict)
s.update()
header.varset[setname] = copy.deepcopy(s)
setname = 'static'
if init or setname not in varset: #init only once
s = InputSet(setname) #default starting set without relaxation
s.kpoints_file = True
s.add_nbands = 1.5
s.vasp_params = {
'ISTART' : 0,
'NELM' : 50,
'EDIFF' : 1e-05,
'NSW' : 0,
'EDIFFG' : 0,
'IBRION' : 1,
'ISIF' : 2,
'PREC' : "Normal",
'ALGO' : "Normal",
'ENCUT' : 400,
'ENAUG' : 400*1.75,
'KSPACING' : 0.2,
'KGAMMA' : ".TRUE.",
'LREAL' : "Auto",
'ISMEAR' : 0,
'SIGMA' : 0.1,
'LPLANE' : ".TRUE.",
'NPAR' : 1,
}
s.potdir = copy.deepcopy(header.nu_dict)
s.update()
header.varset[setname] = copy.deepcopy(s)
setname = 'opt'
if init or setname not in varset: #init only once
# sys.exit()
s = InputSet(setname)
s.kpoints_file = True
s.add_nbands = 1.5
s.vasp_params = {
'IBRION' : 1,
'ENCUT' : 150,
'EDIFFG' : -0.05,
'SIGMA' : 0.2,
'ISIF' : 2,
'EDIFF' : 1e-05,
'NSW' : 20,
'ISMEAR' : 2,
'KSPACING' : 0.2,
}
s.potdir = copy.deepcopy(header.nu_dict)
s.update()
header.varset[setname] = copy.deepcopy(s)
# print(header.varset[setname], setname)
return | dimonaks/siman | siman/set_functions.py | Python | gpl-2.0 | 28,264 | 0.022053 |
import sys
import copy
import json
import os.path
import unittest
from hashlib import sha256
from io import BytesIO
from metacore import storj
from metacore.database import files
from metacore.error_codes import *
from metacore.tests import *
if sys.version_info.major == 3:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
__author__ = 'karatel'
class UploadFileCase(unittest.TestCase):
"""
Test uploading files to the Node.
"""
url = '/api/files/'
def setUp(self):
"""
Switch to test config.
Remember initial records in the 'files' table.
Remember initial files set in the Upload Dir.
Remember initial blacklist content.
"""
self.app = storj.app
self.app.config['TESTING'] = True
self.files = set(tuple(_) for _ in files.select().execute())
self.stored_files = set(os.listdir(self.app.config['UPLOAD_FOLDER']))
self.file_data = b'some data'
valid_hash = sha256(self.file_data).hexdigest()
valid_signature = test_btctx_api.sign_unicode(test_owner_wif,
valid_hash)
self.blocked_data = b'blocked_data'
self.blocked_hash = sha256(self.blocked_data).hexdigest()
with open(self.app.config['BLACKLIST_FILE'], 'r+') as fp:
self.initial_blacklist = fp.read()
fp.writelines((self.blocked_hash + '\n',))
self.send_data = {
'data_hash': valid_hash,
'file_data': (BytesIO(self.file_data), 'test_file'),
'file_role': '000'
}
self.headers = {
'sender_address': test_owner_address,
'signature': valid_signature
}
self.patcher = patch('metacore.processor.BTCTX_API', test_btctx_api)
self.patcher.start()
def tearDown(self):
"""
Switch off some test configs.
Remove new records form the 'files' table.
Remove new files from Upload Dir.
Return initial blacklist content.
"""
self.patcher.stop()
files.delete().where(
files.c.hash not in (_[0] for _ in self.files)
).execute()
added_files = set(
os.listdir(self.app.config['UPLOAD_FOLDER'])
) - self.stored_files
for filename in added_files:
os.unlink(os.path.join(self.app.config['UPLOAD_FOLDER'], filename))
with open(self.app.config['BLACKLIST_FILE'], 'w') as fp:
fp.write(self.initial_blacklist)
def _get_saved_file_path(self):
"""
Generate path for file which is expected to be saved.
:return: file path
:rtype: str
"""
return os.path.join(self.app.config['UPLOAD_FOLDER'],
self.send_data['data_hash'])
def _make_request(self, data, headers=None):
"""
Make a common request for this Test Case. Get a response.
:return: Response
"""
if headers is None:
headers = self.headers
with self.app.test_client() as c:
response = c.post(
path=self.url,
data=data,
content_type='multipart/form-data',
headers=headers
)
return response
def test_success_upload(self):
"""
Upload file with all valid data.
"""
response = self._make_request(self.send_data)
self.assertEqual(201, response.status_code,
"'Created' status code is expected.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'data_hash': self.send_data['data_hash'],
'file_role': self.send_data['file_role']},
json.loads(response.data.decode()),
"Unexpected response data."
)
uploaded_file_record = files.select(
files.c.hash == self.send_data['data_hash']
).execute().first()
self.assertIsNotNone(uploaded_file_record,
"File record does not exist in the table.")
self.assertEqual(self.headers['sender_address'],
uploaded_file_record.owner,
"Sender address has to be stored into 'owner' field.")
self.assertSetEqual(
self.files | {tuple(uploaded_file_record)},
set(tuple(_) for _ in files.select().execute()),
"Only new record has to be inserted in the database. "
"No other changes."
)
try:
with open(self._get_saved_file_path(), 'rb') as stored_file:
self.assertEqual(
self.file_data, stored_file.read(),
"Stored file data does not match with uploaded one."
)
except OSError:
self.assertTrue(False, 'Uploaded file is not saved.')
def test_invalid_hash(self):
"""
Try to upload file with invalid SHA-256 hash.
"""
self.send_data['data_hash'] = 'invalid hash'
self.headers['signature'] = test_btctx_api.sign_unicode(
test_owner_wif,
self.send_data['data_hash']
)
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['INVALID_HASH']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_blocked_hash(self):
"""
Try to upload file with blacklisted SHA-256 hash.
"""
self.send_data.update({
'file_data': (BytesIO(self.blocked_data), 'test_file'),
'data_hash': self.blocked_hash
})
self.headers['signature'] = test_btctx_api.sign_unicode(
test_owner_wif,
self.send_data['data_hash']
)
response = self._make_request(self.send_data)
self.assertEqual(404, response.status_code,
"'Not Found' status code is expected.")
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_invalid_signature(self):
"""
Try to upload file with invalid signature.
"""
self.headers['signature'] = self.headers['signature'].swapcase()
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['INVALID_SIGNATURE']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_mismatched_hash(self):
"""
Try to upload file with mismatched SHA-256 hash.
"""
self.send_data['data_hash'] = sha256(self.file_data + b'_').hexdigest()
self.headers['signature'] = test_btctx_api.sign_unicode(
test_owner_wif, self.send_data['data_hash']
)
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['MISMATCHED_HASH']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_huge_file(self):
"""
Try to upload too big file.
"""
mock_config = copy.deepcopy(self.app.config)
mock_config['MAX_FILE_SIZE'] = len(self.file_data) - 1
with patch('metacore.storj.app.config', mock_config):
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['HUGE_FILE']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_full_disk(self):
"""
Try to upload file with no enough space on disk.
"""
mock_config = copy.deepcopy(self.app.config)
mock_config['NODE'] = Mock(capacity=1)
with patch('metacore.storj.app.config', mock_config):
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['FULL_DISK']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_reached_limit(self):
"""
Try to upload file with bandwidth limit reached.
"""
mock_config = copy.deepcopy(self.app.config)
mock_config['NODE'].set_limits(incoming=1)
with patch('metacore.storj.app.config', mock_config):
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['LIMIT_REACHED']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
if __name__ == '__main__':
unittest.main()
| Storj/metacore | metacore/tests/test_upload.py | Python | agpl-3.0 | 12,336 | 0 |
## encoding=utf-8
#!/usr/bin/env python
"""
cubieboard ip module, which show ip
"""
__author__ = "Dingo"
__copyright__ = "Copyright 2013, Cubieboard Player Project"
__credits__ = ["PySUNXI project"]
__license__ = "GPL"
__version__ = "0.0.2"
__maintainer__= "Dingo"
__email__ = "btrfs@sina.com"
import os, sys, time
import cbtask
import pdb
class IPTask(cbtask.CbPlayerTask):
"""
do IP playing
"""
def __init__(self, name):
cbtask.CbPlayerTask.__init__(self, name)
def main(self): ## task entry
ret = 0
return ret
pass
def handle_enter_key(self): ##
#TODO
pass
def handle_exit_key(self): ##
#TODO
pass
def handle_left_key(self): ##
#TODO
pass
def handle_right_key(self): ##
#TODO
pass
if __name__ == "__main__" :
pass
| netdingo/cbplayer | disk.py | Python | gpl-2.0 | 882 | 0.037415 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform._parameter # pylint: disable=protected-access
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_values)
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/learn/python/learn/dataframe/transforms/example_parser.py | Python | apache-2.0 | 2,407 | 0.005401 |
import pymongo
import json
cache = {}
client = pymongo.MongoClient()
try:
dbConf = json.load(open("./model/db.json","r"))
except:
try:
dbConf = json.load(open("./db.json","r"))
except:
dbConf = {"dbname": "voteview"}
db = client[dbConf["dbname"]]
def metaLookup(api = ""):
if not api:
returnDict = {"loyalty_counts": 0}
elif api == "Web_Members":
returnDict = {"nominate": 0}
for m in db.voteview_metadata.find({}, returnDict).sort('time', -1).limit(1):
meta = m
return meta
| JeffreyBLewis/WebVoteView | model/searchMeta.py | Python | apache-2.0 | 568 | 0.03169 |
# encoding=utf-8
'''
Created on 2013-9-8
@author: gudh
'''
start_pos = (5, 222) # 开始的位置
block_size = (67, 67) # 块大小
rel_pos = (33, 28) # 相对块头位置
colors = (
(255, 255, 255), # 白
(164, 130, 213), # 紫
(247, 214, 82), # 黄
(244, 160, 90), # 土
(90, 186, 238), # 蓝
(247, 69, 95), # 红
(173, 235, 82) # 绿
)
colornames = (u'ba', u'zh', u'hu', u'tu', u'la', u'ho', u'lv')
ax = (35, 35, 35) # 允许的误差
def get_pix(img):
'''获取测试开始位置,块大小'''
m = 5
n = 222 + 67
x = 67
for i in range(7):
print "c%d = %s" % (i + 1, get_color(img, m + i * x + 33, n + 20)[0:3])
def get_pos(i, j):
'''获取块内判断的点'''
x = start_pos[0] + i * block_size[0] + rel_pos[0]
y = start_pos[1] + j * block_size[1] + rel_pos[1]
return (x, y)
def get_rc_pos(rc):
'''获取rc的点,注意横纵是反的'''
x = start_pos[0] + rc[1] * block_size[0] + rel_pos[0]
y = start_pos[1] + rc[0] * block_size[1] + rel_pos[1]
return (x, y)
def get_block(i, j):
'''获取块的区域'''
x = start_pos[0] + i * block_size[0]
y = start_pos[1] + j * block_size[1]
w = x + block_size[0]
h = y + block_size[1]
return (x, y, w, h)
def similar_color(p, color):
'''判断是否是相似'''
#print p, color
for i in range(3):
if abs(p[i] - color[i]) >= ax [i]:
return False
return True
def get_color(img, i, j):
'''获取像素点的颜色'''
p = get_pos(i, j)
#print p
index = 0
color = img.getRawPixel(p[0], p[1])[1:]
#color = img.getpixel(p)
for index in range(len(colors)):
if similar_color(color, colors[index]):
return index
return -1
def get_pic_info(img):
'''获取像素矩阵'''
mat = []
blank_c = 0
for j in range(7):
mx = []
for i in range(7):
c = get_color(img, i, j)
mx.append(c)
if c == -1:
blank_c += 1
mat.append(mx)
print_mat(mat)
if(blank_c > 7):
print "blank is %d, return None" % blank_c
mat = None
return mat
def cut_all(img):
'''将所有单独的块截图保存'''
for j in range(7):
for i in range(7):
b = get_block(i, j)
im = img.crop(b)
im.save("c:/m/%d%d.jpg" % (i, j), "JPEG")
def print_mat(mat):
'''输出结果矩阵'''
print ".", "|", "0 1 2 3 4 5 6"
i = 0
for m in mat:
print i,"|",
i += 1
for n in m:
if n < 0:
print "No",
else:
print colornames[n],
print
if __name__ == "main":
import Image
img = Image.open(r"c:/m.png")
mat = get_pic_info(img)
| Yhzhtk/pytool | game/aixiaochu/xiaochu.py | Python | gpl-2.0 | 2,786 | 0.013943 |
import os
import shutil
import tempfile
from nose.tools import eq_, ok_
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Permission
from django.core.files import File
from crashstats.tokens.models import Token
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.symbols import models
from crashstats.symbols.views import check_symbols_archive_content
from .base import ZIP_FILE, TARGZ_FILE, TGZ_FILE, TAR_FILE
class EmptyFile(object):
def __init__(self, name):
self.name = name
def read(self):
return ''
def size(self):
return 0
class TestViews(BaseTestViews):
def setUp(self):
super(TestViews, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
super(TestViews, self).tearDown()
shutil.rmtree(self.tmp_dir)
def _login(self):
user = User.objects.create_user('test', 'test@mozilla.com', 'secret')
assert self.client.login(username='test', password='secret')
return user
def test_check_symbols_archive_content(self):
content = """
HEADER 1
HEADER 2
Line 1
Line Two
Line Three
"""
# check that the header is not checked
disallowed = ('HEADER',)
with self.settings(DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
error = check_symbols_archive_content(content.strip())
ok_(not error)
# match something
disallowed = ('Two', '2')
with self.settings(DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
error = check_symbols_archive_content(content.strip())
ok_(error)
ok_('Two' in error)
# match nothing
disallowed = ('evil', 'Bad')
with self.settings(DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
error = check_symbols_archive_content(content.strip())
ok_(not error)
def test_home(self):
self._create_group_with_permission('upload_symbols')
url = reverse('symbols:home')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
self._login()
with self.settings(SYMBOLS_PERMISSION_HINT_LINK=None):
response = self.client.get(url)
eq_(response.status_code, 200)
link = {
'url': 'https://bugzilla.mozilla.org',
'label': 'Bugzilla'
}
with self.settings(SYMBOLS_PERMISSION_HINT_LINK=link):
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(link['url'] in response.content)
ok_(link['label'] in response.content)
def test_home_with_previous_uploads(self):
url = reverse('symbols:home')
user = self._login()
self._add_permission(user, 'upload_symbols')
upload1 = models.SymbolsUpload.objects.create(
user=user,
content='file1\nfile2',
filename='file1.zip',
size=12345
)
upload2 = models.SymbolsUpload.objects.create(
user=user,
content='file1\nfile2',
filename='sample.zip',
size=10000
)
with open(ZIP_FILE) as f:
upload2.file.save('sample.zip', File(f))
response = self.client.get(url)
eq_(response.status_code, 200)
# note that the file for upload1 does not exist
ok_(
reverse('symbols:download', args=(upload1.pk,))
not in response.content
)
# but you can for upload 2
ok_(
reverse('symbols:download', args=(upload2.pk,))
in response.content
)
# but you can preview both
ok_(
reverse('symbols:preview', args=(upload1.pk,))
in response.content
)
ok_(
reverse('symbols:preview', args=(upload2.pk,))
in response.content
)
def test_web_upload(self):
url = reverse('symbols:web_upload')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
# you need to have the permission
self._add_permission(user, 'upload_symbols')
response = self.client.get(url)
eq_(response.status_code, 200)
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(ZIP_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, os.path.basename(ZIP_FILE))
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_web_upload_disallowed_content(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# because the file ZIP_FILE contains the word `south-africa-flag.jpeg`
# it should not be allowed to be uploaded
disallowed = ('flag',)
with self.settings(MEDIA_ROOT=self.tmp_dir,
DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
with open(ZIP_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 400)
ok_('flag' in response.content)
def test_web_upload_tar_gz_file(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(TARGZ_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, os.path.basename(TARGZ_FILE))
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_web_upload_tgz_file(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(TGZ_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, os.path.basename(TGZ_FILE))
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_web_upload_tar_file(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(TAR_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, os.path.basename(TAR_FILE))
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_api_upload_about(self):
url = reverse('symbols:api_upload')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
self._add_permission(user, 'upload_symbols')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('you need to generate' in response.content)
token = Token.objects.create(
user=user,
)
token.permissions.add(
Permission.objects.get(codename='upload_symbols')
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('you need to generate' not in response.content)
def test_upload(self):
user = User.objects.create(username='user')
self._add_permission(user, 'upload_symbols')
token = Token.objects.create(
user=user,
)
token.permissions.add(
Permission.objects.get(codename='upload_symbols')
)
url = reverse('symbols:upload')
response = self.client.get(url)
eq_(response.status_code, 405)
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(ZIP_FILE, 'rb') as file_object:
response = self.client.post(
url,
{'file.zip': file_object},
HTTP_AUTH_TOKEN=token.key
)
eq_(response.status_code, 201)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, 'file.zip')
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_upload_disallowed_content(self):
user = User.objects.create(username='user')
self._add_permission(user, 'upload_symbols')
token = Token.objects.create(
user=user,
)
token.permissions.add(
Permission.objects.get(codename='upload_symbols')
)
url = reverse('symbols:upload')
# because the file ZIP_FILE contains the word `south-africa-flag.jpeg`
# it should not be allowed to be uploaded
disallowed = ('flag',)
with self.settings(MEDIA_ROOT=self.tmp_dir,
DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
with open(ZIP_FILE, 'rb') as file_object:
response = self.client.post(
url,
{'file.zip': file_object},
HTTP_AUTH_TOKEN=token.key
)
eq_(response.status_code, 400)
ok_('flag' in response.content)
def test_upload_empty_file(self):
user = User.objects.create(username='user')
self._add_permission(user, 'upload_symbols')
token = Token.objects.create(
user=user,
)
token.permissions.add(
Permission.objects.get(codename='upload_symbols')
)
url = reverse('symbols:upload')
response = self.client.get(url)
eq_(response.status_code, 405)
with self.settings(MEDIA_ROOT=self.tmp_dir):
response = self.client.post(
url,
{'file.zip': EmptyFile('file.zip')},
HTTP_AUTH_TOKEN=token.key
)
eq_(response.status_code, 400)
def test_download(self):
user = User.objects.create_user('test', 'test@mozilla.com', 'secret')
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(ZIP_FILE, 'rb') as file_object:
upload = models.SymbolsUpload.objects.create(
user=user,
file=File(file_object),
filename=os.path.basename(ZIP_FILE),
size=12345,
content='Content'
)
url = reverse('symbols:download', args=(upload.pk,))
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
assert self.client.login(username='test', password='secret')
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/zip')
eq_(
response['Content-Disposition'],
'attachment; filename="sample.zip"'
)
# log in as someone else
user = User.objects.create_user(
'else', 'else@mozilla.com', 'secret'
)
assert self.client.login(username='else', password='secret')
response = self.client.get(url)
eq_(response.status_code, 403)
user.is_superuser = True
user.save()
assert self.client.login(username='else', password='secret')
response = self.client.get(url)
eq_(response.status_code, 200)
def test_preview(self):
user = User.objects.create_user('test', 'test@mozilla.com', 'secret')
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(ZIP_FILE, 'rb') as file_object:
upload = models.SymbolsUpload.objects.create(
user=user,
file=File(file_object),
filename=os.path.basename(ZIP_FILE),
size=12345,
content='Content'
)
url = reverse('symbols:preview', args=(upload.pk,))
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
assert self.client.login(username='test', password='secret')
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.content, 'Content')
eq_(response['Content-Type'], 'text/plain')
# log in as someone else
user = User.objects.create_user(
'else', 'else@mozilla.com', 'secret'
)
assert self.client.login(username='else', password='secret')
response = self.client.get(url)
eq_(response.status_code, 403)
user.is_superuser = True
user.save()
assert self.client.login(username='else', password='secret')
response = self.client.get(url)
eq_(response.status_code, 200)
| bsmedberg/socorro | webapp-django/crashstats/symbols/tests/test_views.py | Python | mpl-2.0 | 15,401 | 0 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import signal
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornadoredis
import torndb
from tornado.options import options, define
import log
import handlers.test
from settings import (
DEBUG, PORT, HOST,
MYSQL_CONFIG, REDIS_CONFIG
)
class Application(tornado.web.Application):
def __init__(self, *args, **kwargs):
_handlers = [
(r"/test/hello", handlers.test.TestHandler),
(r".*", handlers.common.Better404Handler),
]
_settings = {
"debug": options.debug,
}
self.db = torndb.Connection(**MYSQL_CONFIG)
self.redis_conn = tornadoredis.Client(**REDIS_CONFIG)
tornado.web.Application.__init__(self, _handlers, **_settings)
def sig_handler(sig, frame):
log.warning('Caught signal: %s', sig)
tornado.ioloop.IOLoop.instance().stop()
def main():
# Tricks enable some log features of tornado
options.parse_command_line()
log.info("server start")
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
app = Application()
http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
http_server.listen(options.port, options.host)
tornado.ioloop.IOLoop.instance().start()
define("port", default=PORT, help="port", type=int)
define("host", default=HOST, help="host", type=str)
define("debug", default=DEBUG, help="debug mode", type=bool)
if __name__ == '__main__':
main()
| zhkzyth/storm_maker | template/src/app.py | Python | mit | 1,540 | 0 |
"""
.. module:: poes
:synopsis: A module for reading, writing, and storing poes Data
.. moduleauthor:: AJ, 20130129
*********************
**Module**: gme.sat.poes
*********************
**Classes**:
* :class:`poesRec`
**Functions**:
* :func:`readPoes`
* :func:`readPoesFtp`
* :func:`mapPoesMongo`
* :func:`overlayPoesTed`
"""
from davitpy.gme.base.gmeBase import gmeData
class poesRec(gmeData):
"""a class to represent a record of poes data. Extends :class:`gmeBase.gmeData`. Insight on the class members can be obtained from `the NOAA NGDC site <ftp://satdat.ngdc.noaa.gov/sem/poes/data/readme.txt>`_. Note that Poes data is available from 1998-present day (or whatever the latest NOAA has uploaded is). **The data are the 16-second averages**
**Members**:
* **time** (`datetime <http://tinyurl.com/bl352yx>`_): an object identifying which time these data are for
* **info** (str): information about where the data come from. *Please be courteous and give credit to data providers when credit is due.*
* **dataSet** (str): the name of the data set
* **satnum** (ind): the noaa satellite number
* **sslat** (float): Geographic Latitude of sub-satellite point, degrees
* **sslon** (float): Geographic Longitude of sub-satellite point, degrees
* **folat** (float): Geographic Latitude of foot-of-field-line, degrees
* **folon** (float): Geographic Longitude of foot-of-field-line, degrees
* **lval** (float): L-value
* **mlt** (float): Magnetic local time of foot-of-field-line, degrees
* **pas0** (float): MEPED-0 pitch angle at satellite, degrees
* **pas90** (float): MEPED-90 pitch angle at satellite, degrees
* **mep0e1** (float): MEPED-0 > 30 keV electrons, counts/sec
* **mep0e2** (float): MEPED-0 > 100 keV electrons, counts/sec
* **mep0e3** (float): MEPED-0 > 300 keV electrons, counts/sec
* **mep0p1** (float):MEPED-0 30 keV to 80 keV protons, counts/sec
* **mep0p2** (float): MEPED-0 80 keV to 240 keV protons, counts/sec
* **mep0p3** (float): 240 kev to 800 keV protons, counts/sec
* **mep0p4** (float): MEPED-0 800 keV to 2500 keV protons, counts/sec
* **mep0p5** (float): MEPED-0 2500 keV to 6900 keV protons, counts/sec
* **mep0p6** (float): MEPED-0 > 6900 keV protons, counts/sec,
* **mep90e1** (float): MEPED-90 > 30 keV electrons, counts/sec,
* **mep90e2** (float): MEPED-90 > 100 keV electrons, counts/sec
* **mep90e3** (float): MEPED-90 > 300 keV electrons, counts/sec
* **mep90p1** (float): MEPED-90 30 keV to 80 keV protons, counts/sec
* **mep90p2** (float): MEPED-90 80 keV to 240 keV protons, counts/sec
* **mep90p3** (float): MEPED-90 240 kev to 800 keV protons, counts/sec,
* **mep90p4** (float): MEPED-90 800 keV to 2500 keV protons, counts/sec
* **mep90p5** (float): MEPED-90 2500 keV to 6900 keV protons, counts/sec
* **mep90p6** (float):MEPED-90 > 6900 keV protons, counts/sec
* **mepomp6** (float): MEPED omni-directional > 16 MeV protons, counts/sec
* **mepomp7** (float): MEPED omni-directional > 36 Mev protons, counts/sec
* **mepomp8** (float): MEPED omni-directional > 70 MeV protons, counts/sec
* **mepomp9** (float): MEPED omni-directional >= 140 MeV protons
* **ted** (float): TED, Total Energy Detector Average, ergs/cm2/sec
* **echar** (float): TED characteristic energy of electrons, eV
* **pchar** (float): TED characteristic energy of protons, eV
* **econtr** (float): TED electron contribution, Electron Energy/Total Energy
.. note::
If any of the members have a value of None, this means that they could not be read for that specific time
**Methods**:
* :func:`parseFtp`
**Example**:
::
emptyPoesObj = gme.sat.poesRec()
written by AJ, 20130131
"""
def parseFtp(self,line, header):
"""This method is used to convert a line of poes data read from the NOAA NGDC FTP site into a :class:`poesRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`poesRec`
**Args**:
* **line** (str): the ASCII line from the FTP server
**Returns**:
* Nothing.
**Example**:
::
myPoesObj.parseFtp(ftpLine)
written by AJ, 20130131
"""
import datetime as dt
#split the line into cols
cols = line.split()
head = header.split()
self.time = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]), \
int(float(cols[5])),int(round((float(cols[5])-int(float(cols[5])))*1e6)))
for key in self.__dict__.iterkeys():
if(key == 'dataSet' or key == 'info' or key == 'satnum' or key == 'time'): continue
try: ind = head.index(key)
except Exception,e:
print e
print 'problem setting attribute',key
#check for a good value
if(float(cols[ind]) != -999.): setattr(self,key,float(cols[ind]))
def __init__(self, ftpLine=None, dbDict=None, satnum=None, header=None):
"""the intialization fucntion for a :class:`omniRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`omniRec`
**Args**:
* [**ftpLine**] (str): an ASCII line from the FTP server. if this is provided, the object is initialized from it. header must be provided in conjunction with this. default=None
* [**header**] (str): the header from the ASCII FTP file. default=None
* [**dbDict**] (dict): a dictionary read from the mongodb. if this is provided, the object is initialized from it. default = None
* [**satnum**] (int): the satellite nuber. default=None
**Returns**:
* Nothing.
**Example**:
::
myPoesObj = poesRec(ftpLine=aftpLine)
written by AJ, 20130131
"""
#note about where data came from
self.dataSet = 'Poes'
self.info = 'These data were downloaded from NASA SPDF. *Please be courteous and give credit to data providers when credit is due.*'
self.satnum = satnum
self.sslat = None
self.sslon = None
self.folat = None
self.folon = None
self.lval = None
self.mlt = None
self.pas0 = None
self.pas90 = None
self.mep0e1 = None
self.mep0e2 = None
self.mep0e3 = None
self.mep0p1 = None
self.mep0p2 = None
self.mep0p3 = None
self.mep0p4 = None
self.mep0p5 = None
self.mep0p6 = None
self.mep90e1 = None
self.mep90e2 = None
self.mep90e3 = None
self.mep90p1 = None
self.mep90p2 = None
self.mep90p3 = None
self.mep90p4 = None
self.mep90p5 = None
self.mep90p6 = None
self.mepomp6 = None
self.mepomp7 = None
self.mepomp8 = None
self.mepomp9 = None
self.ted = None
self.echar = None
self.pchar = None
self.econtr = None
#if we're initializing from an object, do it!
if(ftpLine != None): self.parseFtp(ftpLine,header)
if(dbDict != None): self.parseDb(dbDict)
def readPoes(sTime,eTime=None,satnum=None,folat=None,folon=None,ted=None,echar=None,pchar=None):
"""This function reads poes data. First, it will try to get it from the mongodb, and if it can't find it, it will look on the NOAA NGDC FTP server using :func:`readPoesFtp`. The data are 16-second averages
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, end Time will be 1 day after sTime. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**satnum**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folon**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bye values in the range [a,b] will be returned. default = None
* [**ted**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bze values in the range [a,b] will be returned. default = None
* [**echar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bym values in the range [a,b] will be returned. default = None
* [**pchar**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bzm values in the range [a,b] will be returned. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readPoes(sTime=dt.datetime(2011,1,1),eTime=dt.datetime(2011,6,1),folat=[60,80])
written by AJ, 20130131
"""
import datetime as dt
import davitpy.pydarn.sdio.dbUtils as db
#check all the inputs for validity
assert(isinstance(sTime,dt.datetime)), \
'error, sTime must be a datetime object'
assert(eTime == None or isinstance(eTime,dt.datetime)), \
'error, eTime must be either None or a datetime object'
assert(satnum == None or isinstance(satnum,int)), 'error, satnum must be an int'
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
assert(var[name] == None or (isinstance(var[name],list) and \
isinstance(var[name][0],(int,float)) and isinstance(var[name][1],(int,float)))), \
'error,'+name+' must None or a list of 2 numbers'
if(eTime == None): eTime = sTime+dt.timedelta(days=1)
qryList = []
#if arguments are provided, query for those
qryList.append({'time':{'$gte':sTime}})
if(eTime != None): qryList.append({'time':{'$lte':eTime}})
if(satnum != None): qryList.append({'satnum':satnum})
var = locals()
for name in ['folat','folon','ted','echar','pchar']:
if(var[name] != None):
qryList.append({name:{'$gte':min(var[name])}})
qryList.append({name:{'$lte':max(var[name])}})
#construct the final query definition
qryDict = {'$and': qryList}
#connect to the database
poesData = db.getDataConn(dbName='gme',collName='poes')
#do the query
if(qryList != []): qry = poesData.find(qryDict)
else: qry = poesData.find()
if(qry.count() > 0):
poesList = []
for rec in qry.sort('time'):
poesList.append(poesRec(dbDict=rec))
print '\nreturning a list with',len(poesList),'records of poes data'
return poesList
#if we didn't find anything on the mongodb
else:
print '\ncould not find requested data in the mongodb'
return None
#print 'we will look on the ftp server, but your conditions will be (mostly) ignored'
##read from ftp server
#poesList = readPoesFtp(sTime, eTime)
#if(poesList != None):
#print '\nreturning a list with',len(poesList),'recs of poes data'
#return poesList
#else:
#print '\n no data found on FTP server, returning None...'
#return None
def readPoesFtp(sTime,eTime=None):
"""This function reads poes data from the NOAA NGDC server via anonymous FTP connection.
.. warning::
You should not use this. Use the general function :func:`readPoes` instead.
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, eTime will be equal 1 day after sTime. default = None
**Returns**:
* **poesList** (list or None): if data is found, a list of :class:`poesRec` objects matching the input parameters is returned. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.readpoesFtp(dt.datetime(2011,1,1,1,50),eTime=dt.datetime(2011,1,1,10,0))
written by AJ, 20130128
"""
from ftplib import FTP
import datetime as dt
assert(isinstance(sTime,dt.datetime)),'error, sTime must be datetime'
if(eTime == None): eTime=sTime+dt.timedelta(days=1)
assert(isinstance(eTime,dt.datetime)),'error, eTime must be datetime'
assert(eTime >= sTime), 'error, end time greater than start time'
#connect to the server
try: ftp = FTP('satdat.ngdc.noaa.gov')
except Exception,e:
print e
print 'problem connecting to NOAA server'
return None
#login as anonymous
try: l=ftp.login()
except Exception,e:
print e
print 'problem logging in to NOAA server'
return None
myPoes = []
#get the poes data
myTime = dt.datetime(sTime.year,sTime.month,sTime.day)
while(myTime <= eTime):
#go to the data directory
try: ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year))
except Exception,e:
print e
print 'error getting to data directory'
return None
#list directory contents
dirlist = ftp.nlst()
for dire in dirlist:
#check for satellite directory
if(dire.find('noaa') == -1): continue
satnum = dire.replace('noaa','')
#chege to file directory
ftp.cwd('/sem/poes/data/avg/txt/'+str(myTime.year)+'/'+dire)
fname = 'poes_n'+satnum+'_'+myTime.strftime("%Y%m%d")+'.txt'
print 'poes: RETR '+fname
#list to hold the lines
lines = []
#get the data
try: ftp.retrlines('RETR '+fname,lines.append)
except Exception,e:
print e
print 'error retrieving',fname
#convert the ascii lines into a list of poesRec objects
#skip first (header) line
for line in lines[1:]:
cols = line.split()
t = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]))
if(sTime <= t <= eTime):
myPoes.append(poesRec(ftpLine=line,satnum=int(satnum),header=lines[0]))
#increment myTime
myTime += dt.timedelta(days=1)
if(len(myPoes) > 0): return myPoes
else: return None
def mapPoesMongo(sYear,eYear=None):
"""This function reads poes data from the NOAA NGDC FTP server via anonymous FTP connection and maps it to the mongodb.
.. warning::
In general, nobody except the database admins will need to use this function
**Args**:
* **sYear** (int): the year to begin mapping data
* [**eYear**] (int or None): the end year for mapping data. if this is None, eYear will be sYear
**Returns**:
* Nothing.
**Example**:
::
gme.sat.mapPoesMongo(2004)
written by AJ, 20130131
"""
import davitpy.pydarn.sdio.dbUtils as db
from davitpy import rcParams
import datetime as dt
#check inputs
assert(isinstance(sYear,int)),'error, sYear must be int'
if(eYear == None): eYear=sYear
assert(isinstance(eYear,int)),'error, sYear must be None or int'
assert(eYear >= sYear), 'error, end year greater than start year'
#get data connection
mongoData = db.getDataConn(username=rcParams['DBWRITEUSER'],password=rcParams['DBWRITEPASS'],\
dbAddress=rcParams['SDDB'],dbName='gme',collName='poes')
#set up all of the indices
mongoData.ensure_index('time')
mongoData.ensure_index('satnum')
mongoData.ensure_index('folat')
mongoData.ensure_index('folon')
mongoData.ensure_index('ted')
mongoData.ensure_index('echar')
mongoData.ensure_index('pchar')
#read the poes data from the FTP server
myTime = dt.datetime(sYear,1,1)
while(myTime < dt.datetime(eYear+1,1,1)):
#10 day at a time, to not fill up RAM
templist = readPoesFtp(myTime,myTime+dt.timedelta(days=10))
if(templist == None): continue
for rec in templist:
#check if a duplicate record exists
qry = mongoData.find({'$and':[{'time':rec.time},{'satnum':rec.satnum}]})
print rec.time, rec.satnum
tempRec = rec.toDbDict()
cnt = qry.count()
#if this is a new record, insert it
if(cnt == 0): mongoData.insert(tempRec)
#if this is an existing record, update it
elif(cnt == 1):
print 'foundone!!'
dbDict = qry.next()
temp = dbDict['_id']
dbDict = tempRec
dbDict['_id'] = temp
mongoData.save(dbDict)
else:
print 'strange, there is more than 1 record for',rec.time
del templist
myTime += dt.timedelta(days=10)
def overlayPoesTed( baseMapObj, axisHandle, startTime, endTime = None, coords = 'geo', \
hemi = 1, folat = [45., 90.], satNum = None, param='ted', scMin=-3.,scMax=0.5) :
"""This function overlays POES TED data onto a map object.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**endTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, data from satellites with in +/- 45 min of the startTime is overlayed. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**coords**] (str): Coordinates of the map object on which you want data to be overlayed on, 'geo', 'mag', 'mlt'. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
[**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with latitude values in the range [a,b] will be returned. default = None
* [**param**] (str): the name of the poes parameter to be plotted. default='ted'
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import models
import matplotlib.cm as cm
from scipy import optimize
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
assert(endTime == None or isinstance(endTime,datetime.datetime)), \
'error, eTime must be either None or a datetime object'
var = locals()
assert(var['satNum'] == None or (isinstance(var['satNum'],list) )), \
'error, satNum must None or a list of satellite (integer) numbers'
if satNum != None :
assert( len(satNum) <= 5 ), \
'error, there are only 5 POES satellites in operation (atleast when I wrote this code)'
assert(var['folat'] == None or (isinstance(var['folat'],list) and \
isinstance(var['folat'][0],(int,float)) and isinstance(var['folat'][1],(int,float)))), \
'error, folat must None or a list of 2 numbers'
# Check the hemisphere and get the appropriate folat
folat = [ math.fabs( folat[0] ) * hemi, math.fabs( folat[1] ) * hemi ]
# Check if the endTime is given in which case the user wants a specific time interval to search for
# If not we'll give him the best available passes for the selected start time...
if ( endTime != None ) :
timeRange = numpy.array( [ startTime, endTime ] )
else :
timeRange = None
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
# check if the timeRange is set... if not set the timeRange to +/- pltTimeInterval of the startTime
if timeRange == None:
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
# SatNums - currently operational POES satellites are 15, 16, 17, 18, 19
if satNum == None:
satNum = [None]
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
goodFlg=False
for sN in range(len(satNum)) :
if(satNum[sN] != None):
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat)
else:
currPoesList = Poes.readPoes(timeRange[0], eTime = timeRange[1], satnum = satNum[sN], folat = folat)
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
#return None
else:
goodFlg=True
# Loop through the list and store the data into arrays
lenDataAll.append(len(currPoesList))
for l in currPoesList :
# Store our data in arrays
try:
tedPoesAll[sN].append(math.log10(getattr(l,param)))
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(l.folat,l.folon, 0., l.time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(l.time),lon)*360./24.)
else:
latPoesAll[sN].append(l.folat)
lonPoesAll[sN].append(l.folon)
timePoesAll[sN].append(l.time)
except Exception,e:
print e
print 'could not get parameter for time',l.time
if(not goodFlg): return None
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
poesTicks = [ -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5 ]
# get the axis of the figure...
ax = axisHandle
for nn in range( len(satNum) ) :
x, y = baseMapObj(lonPoesAll[nn], latPoesAll[nn])
bpltpoes = baseMapObj.scatter(x,y,c=tedPoesAll[nn], vmin=scMin, vmax=scMax, alpha = 0.7, cmap=cm.jet, zorder = 7., edgecolor='none')
timeCurr = timePoesAll[nn]
for aa in range( len(latPoesAll[nn]) ) :
if aa % 10 == 0:
str_curr = str(timeCurr[aa].hour)+':'+str(timeCurr[aa].minute)
ax.annotate( str_curr, xy =( x[aa], y[aa] ), size = 5, zorder = 6. )
#cbar = plt.colorbar(bpltpoes, ticks = poesTicks, orientation='horizontal')
#cbar.ax.set_xticklabels(poesTicks)
#cbar.set_label(r"Total Log Energy Flux [ergs cm$^{-2}$ s$^{-1}$]")
return bpltpoes
def overlayPoesBnd( baseMapObj, axisHandle, startTime, coords = 'geo', hemi = 1, equBnd = True, polBnd = False ) :
"""This function reads POES TED data with in +/- 45min of the given time, fits the auroral oval boundaries and overlays them on a map object. The poleward boundary is not accurate all the times due to lesser number of satellite passes identifying it.
**Args**:
* **baseMapObj** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the map object you want data to be overlayed on.
* **axisHandle** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the Axis Handle used.
* **startTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the starttime you want data for. If endTime is not given overlays data from satellites with in +/- 45 min of the startTime
* [**coords**] (list or None): Coordinates of the map object on which you want data to be overlayed on. Default 'geo'
* [**hemi**] (list or None): Hemisphere of the map object on which you want data to be overlayed on. Value is 1 for northern hemisphere and -1 for the southern hemisphere.Default 1
* [**equBnd**] (list or None): If this is True the equatorward auroral oval boundary fit from the TED data is overlayed on the map object. Default True
* [**polBnd**] (list or None): If this is True the poleward auroral oval boundary fit from the TED data is overlayed on the map object. Default False
**Returns**:
POES TED data is overlayed on the map object. If no data is found, None is returned.
**Example**:
::
import datetime as dt
poesList = gme.sat.overlayPoesTed(MapObj, sTime=dt.datetime(2011,3,4,4))
written by Bharat Kunduri, 20130216
"""
import utils
import matplotlib as mp
import datetime
import numpy
import matplotlib.pyplot as plt
import gme.sat.poes as Poes
import math
import matplotlib.cm as cm
from scipy import optimize
import models
#check all the inputs for validity
assert(isinstance(startTime,datetime.datetime)), \
'error, sTime must be a datetime object'
# Check the hemisphere and get the appropriate folat
folat = [ 45. * hemi, 90. * hemi ]
# Get the time range we choose +/- 45 minutes....
pltTimeInterval = numpy.array( datetime.timedelta( minutes = 45 ) )
timeRange = numpy.array( [ startTime - pltTimeInterval, startTime + pltTimeInterval ] )
satNum = [ 15, 16, 17, 18, 19 ]
# We set the TED cut-off value to -0.75,
# From observed cases this appeared to do well...
# though fails sometimes especially during geomagnetically quiet times...
# However this is version 1.0 and there always is room for improvement
equBndCutoffVal = -0.75
# If any particular satellite number is not chosen by user loop through all the available one's
satNum = numpy.array( satNum ) # I like numpy arrays better that's why I'm converting the satNum list to a numpy array
latPoesAll = [[] for j in range(len(satNum))]
lonPoesAll = [[] for j in range(len(satNum))]
tedPoesAll = [[] for j in range(len(satNum))]
timePoesAll = [[] for j in range(len(satNum))]
lenDataAll = [[] for j in range(len(satNum))]
for sN in range( len(satNum) ) :
currPoesList = Poes.readPoes( timeRange[0], eTime = timeRange[1], satnum = int(satNum[sN]), folat = folat )
# Check if the data is loaded...
if currPoesList == None :
print 'No data found'
continue
# Loop through the list and store the data into arrays
lenDataAll.append( len( currPoesList ) )
for l in range( lenDataAll[-1] ) :
# Store our data in arrays if the TED data value is > than the cutoff value
try:
x = math.log10(currPoesList[l].ted)
except:
continue
if x > equBndCutoffVal:
if coords == 'mag' or coords == 'mlt':
lat,lon,_ = models.aacgm.aacgmConv(currPoesList[l].folat,currPoesList[l].folon, 0., currPoesList[l].time.year, 0)
latPoesAll[sN].append(lat)
if coords == 'mag':
lonPoesAll[sN].append(lon)
else:
lonPoesAll[sN].append(models.aacgm.mltFromEpoch(utils.timeUtils.datetimeToEpoch(currPoesList[l].time),lon)*360./24.)
else:
latPoesAll[sN].append(currPoesList[l].folat)
lonPoesAll[sN].append(currPoesList[l].folon)
# latPoesAll[sN].append( currPoesList[l].folat )
# lonPoesAll[sN].append( currPoesList[l].folon )
tedPoesAll[sN].append( math.log10(currPoesList[l].ted) )
timePoesAll[sN].append( currPoesList[l].time )
latPoesAll = numpy.array( latPoesAll )
lonPoesAll = numpy.array( lonPoesAll )
tedPoesAll = numpy.array( tedPoesAll )
timePoesAll = numpy.array( timePoesAll )
lenDataAll = numpy.array( lenDataAll )
# Now to identify the boundaries...
# Also need to check if the boundary is equatorward or poleward..
# When satellite is moving from high-lat to low-lat decrease in flux would mean equatorward boundary
# When satellite is moving from low-lat to high-lat increase in flux would mean equatorward boundary
# that is what we are trying to check here
eqBndLats = []
eqBndLons = []
poBndLats = []
poBndLons = []
for n1 in range( len(satNum) ) :
currSatLats = latPoesAll[n1]
currSatLons = lonPoesAll[n1]
currSatTeds = tedPoesAll[n1]
testLatArrLtoh = []
testLonArrLtoh = []
testLatArrHtol = []
testLonArrHtol = []
testLatArrLtohP = []
testLonArrLtohP = []
testLatArrHtolP = []
testLonArrHtolP = []
for n2 in range( len(currSatLats)-1 ) :
#Check if the satellite is moving form low-lat to high-lat or otherwise
if ( math.fabs( currSatLats[n2] ) < math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrLtoh.append( currSatLats[n2] )
testLonArrLtoh.append( currSatLons[n2] )
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrLtohP.append( currSatLats[n2] )
testLonArrLtohP.append( currSatLons[n2] )
if ( math.fabs( currSatLats[n2] ) > math.fabs( currSatLats[n2+1] ) ) :
if ( currSatTeds[n2] > currSatTeds[n2+1] ) :
testLatArrHtol.append( currSatLats[n2] )
testLonArrHtol.append( currSatLons[n2] )
if ( currSatTeds[n2] < currSatTeds[n2+1] ) :
testLatArrHtolP.append( currSatLats[n2] )
testLonArrHtolP.append( currSatLons[n2] )
# I do this to find the index of the min lat...
if ( testLatArrLtoh != [] ) :
testLatArrLtoh = numpy.array( testLatArrLtoh )
testLonArrLtoh = numpy.array( testLonArrLtoh )
VarEqLat1 = testLatArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
VarEqLon1 = testLonArrLtoh[ numpy.where( testLatArrLtoh == min(testLatArrLtoh) ) ]
eqBndLats.append( VarEqLat1[0] )
eqBndLons.append( VarEqLon1[0] )
if ( testLatArrHtol != [] ) :
testLatArrHtol = numpy.array( testLatArrHtol )
testLonArrHtol = numpy.array( testLonArrHtol )
VarEqLat2 = testLatArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
VarEqLon2 = testLonArrHtol[ numpy.where( testLatArrHtol == min(testLatArrHtol) ) ]
eqBndLats.append( VarEqLat2[0] )
eqBndLons.append( VarEqLon2[0] )
if ( testLatArrLtohP != [] ) :
testLatArrLtohP = numpy.array( testLatArrLtohP )
testLonArrLtohP = numpy.array( testLonArrLtohP )
VarEqLatP1 = testLatArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
VarEqLonP1 = testLonArrLtohP[ numpy.where( testLatArrLtohP == min(testLatArrLtohP) ) ]
if VarEqLatP1[0] > 64. :
poBndLats.append( VarEqLatP1[0] )
poBndLons.append( VarEqLonP1[0] )
if ( testLatArrHtolP != [] ) :
testLatArrHtolP = numpy.array( testLatArrHtolP )
testLonArrHtolP = numpy.array( testLonArrHtolP )
VarEqLatP2 = testLatArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
VarEqLonP2 = testLonArrHtolP[ numpy.where( testLatArrHtolP == min(testLatArrHtolP) ) ]
if VarEqLatP2[0] > 64 :
poBndLats.append( VarEqLatP2[0] )
poBndLons.append( VarEqLonP2[0] )
eqBndLats = numpy.array( eqBndLats )
eqBndLons = numpy.array( eqBndLons )
poBndLats = numpy.array( poBndLats )
poBndLons = numpy.array( poBndLons )
#get the axis Handle used
ax = axisHandle
# Now we do the fitting part...
fitfunc = lambda p, x: p[0] + p[1]*numpy.cos(2*math.pi*(x/360.)+p[2]) # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
# Initial guess for the parameters
# Equatorward boundary
p0Equ = [ 1., 1., 1.]
p1Equ, successEqu = optimize.leastsq(errfunc, p0Equ[:], args=(eqBndLons, eqBndLats))
if polBnd == True :
p0Pol = [ 1., 1., 1.]
p1Pol, successPol = optimize.leastsq(errfunc, p0Pol[:], args=(poBndLons, poBndLats))
allPlotLons = numpy.linspace(0., 360., 25.)
allPlotLons[-1] = 0.
eqPlotLats = []
if polBnd == True :
poPlotLats = []
for xx in allPlotLons :
if equBnd == True :
eqPlotLats.append( p1Equ[0] + p1Equ[1]*numpy.cos(2*math.pi*(xx/360.)+p1Equ[2] ) )
if polBnd == True :
poPlotLats.append( p1Pol[0] + p1Pol[1]*numpy.cos(2*math.pi*(xx/360.)+p1Pol[2] ) )
xEqu, yEqu = baseMapObj(allPlotLons, eqPlotLats)
bpltpoes = baseMapObj.plot( xEqu,yEqu, zorder = 7., color = 'b' )
if polBnd == True :
xPol, yPol = baseMapObj(allPlotLons, poPlotLats)
bpltpoes = baseMapObj.plot( xPol,yPol, zorder = 7., color = 'r' )
| Shirling-VT/davitpy_sam | davitpy/gme/sat/poes.py | Python | gpl-3.0 | 33,273 | 0.033751 |
from django.contrib import admin
from .models import *
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
class ReviewCategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
class BlogCategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
class CardAdmin(admin.ModelAdmin):
model = Card
list_display = ['title', 'description', 'get_category', ]
def get_category(self, obj):
return obj.category.title
get_category.admin_order_field = 'category__title'
get_category.short_description = 'Category'
class AccessAdmin(admin.ModelAdmin):
list_display = ['ip', 'visit_count']
class ReviewsAdmin(admin.ModelAdmin):
list_display = ['title', 'description', 'get_category', 'score', 'created']
def get_category(self, obj):
return obj.category.title
get_category.admin_order_field = 'category__title'
get_category.short_description = 'Category'
class BlogAdmin(admin.ModelAdmin):
model = Card
list_display = ['title', 'description', 'get_category', 'position',]
def get_category(self, obj):
return obj.category.title
get_category.admin_order_field = 'category__title'
get_category.short_description = 'Category'
admin.site.register(Category, CategoryAdmin)
admin.site.register(Card, CardAdmin)
admin.site.register(AccessCount, AccessAdmin)
admin.site.register(ReviewCategory, ReviewCategoryAdmin)
admin.site.register(Reviews, ReviewsAdmin)
admin.site.register(Blog, BlogAdmin)
admin.site.register(BlogCategory, BlogCategoryAdmin)
| BijoySingh/HomePage | homepage/admin.py | Python | gpl-2.0 | 1,571 | 0.00191 |
#!/usr/bin/env python
#
# CLUES - Cluster Energy Saving System
# Copyright (C) 2015 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import requests
import cpyutils.config
import clueslib.helpers as Helpers
import json, time
import os
from cpyutils.evaluate import TypedClass, TypedList
from cpyutils.log import Log
from clueslib.node import NodeInfo
from clueslib.platform import LRMS
from clueslib.request import Request, ResourcesNeeded, JobInfo
_LOGGER = Log("PLUGIN-NOMAD")
def open_file(file_path):
try:
file_read = open(file_path, 'r')
except:
message = "Could not open file with path '%s'" % file_path
_LOGGER.error(message)
raise Exception(message)
return file_read
def _get_memory_in_bytes(str_memory):
if str_memory.strip()[-2:] in ['Mi', 'Gi', 'Ki', 'Ti']:
unit = str_memory.strip()[-2:][1]
memory = int(str_memory.strip()[:-2])
elif str_memory.strip()[-1:] in ['M', 'G', 'K', 'T']:
unit = str_memory.strip()[-1:]
memory = int(str_memory.strip()[:-1])
else:
return int(str_memory)
if unit == 'K':
memory *= 1024
elif unit == 'M':
memory *= 1024 * 1024
elif unit == 'G':
memory *= 1024 * 1024 * 1024
elif unit == 'T':
memory *= 1024 * 1024 * 1024 * 1024
return memory
class lrms(LRMS):
def _create_request(self, method, url, headers=None, body=None, auth_data=None):
if body is None:
body = {}
if headers is None:
headers = {}
if self._acl_token is not None:
headers.update({ 'X-Nomad-Token': self._acl_token})
auth = None
if auth_data is not None:
if 'user' in auth_data and 'passwd' in auth_data:
auth=requests.auth.HTTPBasicAuth( auth_data['user'], auth_data['passwd'])
response = {}
retries = 0
ok = False
while (self._max_retries > retries) and (not ok) :
retries += 1
try:
r = requests.request(method, url, verify=self._verify, cert=self._certs, headers=headers, data=body, auth=auth)
response[ 'status_code' ] = r.status_code
response[ 'text' ] = r.text
response[ 'json' ] = r.json()
ok=True
except requests.exceptions.ConnectionError:
_LOGGER.error("Cannot connect to %s, waiting 5 seconds..." % (url))
time.sleep(5)
except ValueError as e:
_LOGGER.error("JSON cannot be decoded: %s" %(r.text))
response[ 'json' ]={}
if not ok:
_LOGGER.error("Cannot connect to %s . Retries: %s" % (url, retries))
response[ 'status_code' ] = -1
response[ 'text' ] = 'No response text'
response[ 'json' ] = {}
return response
def __init__(self, NOMAD_SERVER=None, NOMAD_HEADERS=None, NOMAD_API_VERSION=None, NOMAD_API_URL_GET_ALLOCATIONS=None, NOMAD_API_URL_GET_SERVERS=None, NOMAD_API_URL_GET_CLIENTS=None, NOMAD_API_URL_GET_CLIENT_INFO=None, MAX_RETRIES=None, NOMAD_ACL_TOKEN=None, NOMAD_AUTH_DATA=None, NOMAD_API_URL_GET_CLIENT_STATUS=None, NOMAD_STATE_OFF=None, NOMAD_STATE_ON=None, NOMAD_PRIVATE_HTTP_PORT=None, NOMAD_API_URL_GET_JOBS=None, NOMAD_API_URL_GET_JOBS_INFO=None, NOMAD_API_URL_GET_ALLOCATION_INFO=None, NOMAD_NODES_LIST_CLUES=None, NOMAD_QUEUES=None, NOMAD_QUEUES_OJPN=None, NOMAD_API_URL_GET_CLIENT_ALLOCATIONS=None, NOMAD_DEFAULT_CPUS_PER_NODE=None, NOMAD_DEFAULT_MEMORY_PER_NODE=None, NOMAD_DEFAULT_CPU_GHZ=None, NOMAD_CA_CERT=None, NOMAD_SERVER_CERT=None, NOMAD_SERVER_KEY=None):
config_nomad = cpyutils.config.Configuration(
"NOMAD",
{
"NOMAD_SERVER": "http://localhost:4646",
"NOMAD_HEADERS": "{}",
"NOMAD_API_VERSION": "/v1",
"NOMAD_API_URL_GET_SERVERS": "/agent/members", # Server node
"NOMAD_API_URL_GET_CLIENTS": "/nodes", # Server node
"NOMAD_API_URL_GET_CLIENT_INFO": "/node/$CLIENT_ID$", # Server node
"NOMAD_API_URL_GET_CLIENT_STATUS": "/client/stats", # Client node
"NOMAD_API_URL_GET_CLIENT_ALLOCATIONS": "/node/$CLIENT_ID$/allocations", # Server node
"NOMAD_API_URL_GET_ALLOCATIONS": "/allocations", # Server node
"NOMAD_API_URL_GET_JOBS": "/jobs", # Server node
"NOMAD_API_URL_GET_JOBS_INFO": "/job/$JOB_ID$", # Server node
"NOMAD_API_URL_GET_ALLOCATION_INFO": "/allocation", # Server node
"NOMAD_ACL_TOKEN": None,
"MAX_RETRIES": 10,
"NOMAD_AUTH_DATA": None,
"NOMAD_STATE_OFF": "down",
"NOMAD_STATE_ON": "ready",
"NOMAD_PRIVATE_HTTP_PORT": "4646",
"NOMAD_NODES_LIST_CLUES": "/etc/clues2/nomad_vnodes.info",
"NOMAD_QUEUES": "default",
"NOMAD_QUEUES_OJPN": "", # Queues One Job Per Node
"NOMAD_DEFAULT_CPUS_PER_NODE": 2.0,
"NOMAD_DEFAULT_MEMORY_PER_NODE": "8Gi",
"NOMAD_DEFAULT_CPU_GHZ": 2.6, # Nomad use MHz to manage the jobs assigned CPU
"NOMAD_SERVER_CERT": None,
"NOMAD_SERVER_KEY": None,
"NOMAD_CA_CERT": None,
"NOMAD_TOKEN": None
}
)
self._server_url = Helpers.val_default(NOMAD_SERVER, config_nomad.NOMAD_SERVER).replace('"','')
self._api_version = Helpers.val_default(NOMAD_API_VERSION, config_nomad.NOMAD_API_VERSION).replace('"','')
self._api_url_get_allocations = Helpers.val_default(NOMAD_API_URL_GET_ALLOCATIONS, config_nomad.NOMAD_API_URL_GET_ALLOCATIONS).replace('"','')
self._api_url_get_allocation_info = Helpers.val_default(NOMAD_API_URL_GET_ALLOCATION_INFO, config_nomad.NOMAD_API_URL_GET_ALLOCATION_INFO).replace('"','')
self._api_url_get_jobs = Helpers.val_default(NOMAD_API_URL_GET_JOBS, config_nomad.NOMAD_API_URL_GET_JOBS).replace('"','')
self._api_url_get_jobs_info = Helpers.val_default(NOMAD_API_URL_GET_JOBS_INFO, config_nomad.NOMAD_API_URL_GET_JOBS_INFO).replace('"','')
self._api_url_get_servers = Helpers.val_default(NOMAD_API_URL_GET_SERVERS, config_nomad.NOMAD_API_URL_GET_SERVERS).replace('"','')
self._api_url_get_clients = Helpers.val_default(NOMAD_API_URL_GET_CLIENTS, config_nomad.NOMAD_API_URL_GET_CLIENTS).replace('"','')
self._api_url_get_clients_info = Helpers.val_default(NOMAD_API_URL_GET_CLIENT_INFO, config_nomad.NOMAD_API_URL_GET_CLIENT_INFO).replace('"','')
self._api_url_get_clients_status = Helpers.val_default(NOMAD_API_URL_GET_CLIENT_STATUS, config_nomad.NOMAD_API_URL_GET_CLIENT_STATUS).replace('"','')
self._api_url_get_clients_allocations = Helpers.val_default(NOMAD_API_URL_GET_CLIENT_ALLOCATIONS, config_nomad.NOMAD_API_URL_GET_CLIENT_ALLOCATIONS).replace('"','')
self._max_retries = Helpers.val_default(MAX_RETRIES, config_nomad.MAX_RETRIES)
self._acl_token = Helpers.val_default(NOMAD_ACL_TOKEN, config_nomad.NOMAD_ACL_TOKEN)
self._auth_data = Helpers.val_default(NOMAD_AUTH_DATA, config_nomad.NOMAD_AUTH_DATA)
self._state_off = Helpers.val_default(NOMAD_STATE_OFF, config_nomad.NOMAD_STATE_OFF).replace('"','')
self._state_on = Helpers.val_default(NOMAD_STATE_ON, config_nomad.NOMAD_STATE_ON).replace('"','')
self._http_port = Helpers.val_default(NOMAD_PRIVATE_HTTP_PORT, config_nomad.NOMAD_PRIVATE_HTTP_PORT).replace('"','')
self._nodes_info_file = Helpers.val_default(NOMAD_NODES_LIST_CLUES, config_nomad.NOMAD_NODES_LIST_CLUES).replace('"','')
self._queues = Helpers.val_default(NOMAD_QUEUES, config_nomad.NOMAD_QUEUES).replace('"','').split(',')
self._queues_ojpn = Helpers.val_default(NOMAD_QUEUES_OJPN, config_nomad.NOMAD_QUEUES_OJPN).replace('"','').split(',')
self._default_cpu_node = Helpers.val_default(NOMAD_DEFAULT_CPUS_PER_NODE, config_nomad.NOMAD_DEFAULT_CPUS_PER_NODE)
self._default_memory_node = Helpers.val_default(NOMAD_DEFAULT_MEMORY_PER_NODE, config_nomad.NOMAD_DEFAULT_MEMORY_PER_NODE).replace('"','')
self._queue_constraint_target = '${node.class}'
self._cpu_mhz_per_core = float(config_nomad.NOMAD_DEFAULT_CPU_GHZ)
if NOMAD_DEFAULT_CPU_GHZ != None and float(NOMAD_DEFAULT_CPU_GHZ) != 0.0:
self._cpu_mhz_per_core = NOMAD_DEFAULT_CPU_GHZ
self._cpu_mhz_per_core = self._cpu_mhz_per_core * 1000.0 # To MHz
self._verify = Helpers.val_default(NOMAD_CA_CERT, config_nomad.NOMAD_CA_CERT)
if self._verify == None:
self._verify=False
self._certs = []
server_cert_file = Helpers.val_default(NOMAD_SERVER_CERT, config_nomad.NOMAD_SERVER_CERT)
server_key_file = Helpers.val_default(NOMAD_SERVER_KEY, config_nomad.NOMAD_SERVER_KEY)
https_active = ('https'==self._server_url[:5])
https_loading_error = False
if server_cert_file:
exists = os.path.isfile(server_cert_file)
if exists:
self._certs.append(server_cert_file)
else:
https_loading_error = True
_LOGGER.error("The path of server certificate file does not exists: %s " % str(server_cert_file) )
if server_key_file:
exists = os.path.isfile(server_key_file)
if exists:
self._certs.append(server_key_file)
else:
https_loading_error = True
_LOGGER.error("The path of server private key of certificate file does not exists: %s " % str(server_key_file) )
if self._verify != False:
exists = os.path.isfile(self._verify)
if not exists:
https_loading_error = True
_LOGGER.error("The path of CA certicate file does not exists: %s " % str(self._verify) )
if https_active and len(self._certs) == 0:
https_loading_error = True
_LOGGER.error("Due to you are using TLS, it's required to provide the certificate and the private key (in 1 or 2 files)" )
self._certs = tuple(self._certs)
if https_loading_error and https_active:
_LOGGER.error("Some error encounted: %s " % str(self._verify) )
self._protocol='http'
if https_active:
self._protocol ='https'
# Check length of queues
if len(self._queues) <= 0:
_LOGGER.error("Error reading NOMAD_QUEUES, NOMAD_QUEUES will be %s" % str(config_nomad.NOMAD_QUEUES) )
self._queues = [ config_nomad.NOMAD_QUEUES ]
try:
self._headers = json.loads(Helpers.val_default(NOMAD_HEADERS, config_nomad.NOMAD_HEADERS))
except ValueError:
self._headers = {}
_LOGGER.error("Error loading variable NOMAD_HEADERS from config file, NOMAD_HEADERS will be %s" % str(config_nomad.NOMAD_HEADERS) )
LRMS.__init__(self, "TOKEN_%s" % self._server_url)
# CLUES API
def get_jobinfolist(self):
# Obtain server nodes
server_nodes_info = self._get_server_nodes_info()
taskinfolist, jobs_by_server = self._get_jobinfolist(server_nodes_info)
return taskinfolist
def get_nodeinfolist(self):
# Obtain server nodes
server_nodes_info = self._get_server_nodes_info()
nodeinfolist = self._get_nodeinfolist(server_nodes_info)
return nodeinfolist
# AUX FUNCTIONS
def _get_NodeInfo (self, info_node, default_info_node):
# Check queues
keywords = default_info_node['keywords']
queues = default_info_node['keywords']['queues']
q = info_node['node_class']
if not (q in self._queues or q == '') :
_LOGGER.error(" '%s' (node_class of Nomad Client) is not a valid queue, queue is set to queue of file %s." % (q, self._nodes_info_file))
if q in self._queues:
queues = [ q ]
keywords['queues'] = TypedList([TypedClass.auto(q) for q in queues])
# Illustrative values for Clues, since the node is not running, we cannot know the real values
slots_count = default_info_node['cpus']
slots_free = default_info_node['cpus']
memory_total = default_info_node['memory']
memory_free = default_info_node['memory']
# Information obtained from queries
if 'slots_count' in info_node['resources']:
slots_count = info_node['resources']['slots_count']
if 'memory_total' in info_node['resources']:
memory_total = info_node['resources']['memory_total']
if 'slots_used' in info_node['resources']:
slots_free = float(slots_count) - float(info_node['resources']['slots_used'])
if 'memory_used' in info_node['resources']:
memory_free = float(memory_total) - float(info_node['resources']['memory_used'])
# Check state
state = NodeInfo.UNKNOWN
if (info_node['status'] == self._state_on and not info_node['any_job_is_running']):
state = NodeInfo.IDLE
elif (info_node['status'] == self._state_on and info_node['any_job_is_running']):
state = NodeInfo.USED
elif (info_node['status'] == self._state_off):
state = NodeInfo.OFF
#_LOGGER.debug(" name= " + info_node['name'] + ", slots_count= " + str(slots_count) + ", slots_free= " + str(slots_free) + ", memory_total= " + str(memory_total) + ", memory_free= " + str(memory_free) + ", keywords= " + str(keywords) + ", memory_used=" + str(info_node['resources']['memory_used']) + ", slots_used=" + str(info_node['resources']['slots_used']) )
node = NodeInfo(info_node['name'], slots_count, slots_free, memory_total, memory_free, keywords)
node.state = state
return node
def _get_server_nodes_info(self):
server_nodes_info = []
url = self._server_url + self._api_version + self._api_url_get_servers
response = self._create_request('GET', url, auth_data=self._auth_data)
if (response[ 'status_code' ] == 200):
for node in response['json']['Members']:
server_nodes_info.append(self._protocol+'://'+node['Addr']+':'+self._http_port)
else:
_LOGGER.error("Error getting Nomad Server nodes addresses: %s: %s" % (response['status_code'], response['text']))
return server_nodes_info
def _is_Client_runningAJob (self, server_node, client_id):
url = server_node + self._api_version + self._api_url_get_clients_allocations.replace('$CLIENT_ID$', client_id )
response = self._create_request('GET', url)
if (response['status_code'] == 200):
for alloc in response['json']:
if alloc['ClientStatus'] in ['pending', 'running']:
#_LOGGER.debug("_is_Client_runningAJob is TRUE")
return True
else:
_LOGGER.error("Error getting information about allocations of client with ID=%s from Server node with URL=%s: %s: %s" % (client_id, server_node, response['status_code'], response['text']))
return False
def _get_Client_resources (self, server_node, client_id):
client_addr = self._get_Client_address(server_node, client_id)
if client_addr == None:
return {}
resources = {}
# Querying Client node for getting the slots_count and memory_total
url = self._protocol + '://' + client_addr + self._api_version + self._api_url_get_clients_status
response = self._create_request('GET', url)
if (response['status_code'] == 200):
resources['slots_count'] = len(response['json']['CPU'])
resources['memory_total'] = response['json']['Memory']['Total']
else:
_LOGGER.error("Error getting client_status from Client_url=%s: %s: %s" % (client_addr, response['status_code'], response['text']))
# Querying Client node for getting the slots_used and memory_used
url = server_node + self._api_version + self._api_url_get_clients_allocations.replace('$CLIENT_ID$', client_id )
response = self._create_request('GET', url)
if (response['status_code'] == 200):
resources['slots_used'] = 0.0
resources['memory_used'] = 0.0
for alloc in response['json']:
if alloc['ClientStatus'] in ['pending', 'running']: # The job is running or will be soon
resources['slots_used'] += ( float(alloc['Resources']['CPU']) / self._cpu_mhz_per_core)
resources['memory_used'] += float( _get_memory_in_bytes(str(alloc['Resources']['MemoryMB'])+"M"))
else:
_LOGGER.error("Error getting information about allocations of client with ID=%s from Server node with URL=%s: %s: %s" % (client_id, server_node, response['status_code'], response['text']))
return resources
def _get_Clients_by_Server(self, server_node):
clients = {}
url = server_node + self._api_version + self._api_url_get_clients
response = self._create_request('GET', url)
if (response['status_code'] == 200):
for client in response['json']:
client_id = client['ID']
clients[ client_id ] = {}
clients[ client_id ]['client_id'] = client_id
clients[ client_id ]['name'] = client['Name']
clients[ client_id ]['status'] = client['Status']
clients[ client_id ]['status_description'] = client['StatusDescription']
clients[ client_id ]['node_class'] = client['NodeClass']
clients[ client_id ]['any_job_is_running'] = self._is_Client_runningAJob (server_node, client['ID'] )
clients[ client_id ]['state'] = NodeInfo.OFF
if (client['Status'] == self._state_on):
clients[ client['ID'] ]['state'] = NodeInfo.IDLE
else:
_LOGGER.error("Error getting information about the Clients of the Server node with URL=%s: %s: %s" % (server_node, response['status_code'], response['text']))
return clients
def _get_Client_address(self, server_node, client_id):
addr = None
url = server_node + self._api_version + self._api_url_get_clients_info.replace('$CLIENT_ID$', client_id )
response = self._create_request('GET', url)
if (response['status_code'] == 200):
addr = response['json']['HTTPAddr']
else:
_LOGGER.error("Error getting client_addr from Server_url=%s and Client_ID=%s: %s: %s" % (server_node, client_id, response['status_code'], response['text']))
return addr
def _get_nodeinfolist(self, server_nodes_info):
##_LOGGER.info("***** START - get_nodeinfolist ***** ")
nodeinfolist = collections.OrderedDict()
default_node_info = collections.OrderedDict()
# DEFAULT NODE INFO
try:
vnodes = json.load(open(self._nodes_info_file, 'r'))
for vnode in vnodes:
NODE = {}
NODE['name'] = vnode["name"]
NODE['state'] = NodeInfo.OFF
NODE['keywords'] = {}
NODE['cpus'] = float(self._default_cpu_node)
if "cpu" in vnode:
NODE['cpus'] = int(vnode["cpu"])
NODE['memory'] = _get_memory_in_bytes(self._default_memory_node)
if "memory" in vnode:
NODE['memory'] = _get_memory_in_bytes(vnode["memory"])
if "keywords" in vnode:
for keypair in vnode["keywords"].split(','):
parts = keypair.split('=')
NODE['keywords'][parts[0].strip()] = TypedClass(parts[1].strip(), TypedClass.STRING)
if "queues" in vnode:
queues = vnode["queues"].split(",")
if queues:
NODE['keywords']['queues'] = TypedList([TypedClass.auto(q) for q in queues])
else: # All queues to the node
NODE['keywords']['queues'] = TypedList([TypedClass.auto(q) for q in self._queues[:] ])
default_node_info[ NODE['name'] ] = NODE
except Exception as ex:
_LOGGER.error("Error processing file %s: %s" % (self._nodes_info_file , str(ex)) )
clients_by_server = {}
for server_node in server_nodes_info:
clients_by_server[ server_node ] = self._get_Clients_by_Server(server_node) # Obtain ID, Name, Status, NodeClass and if the Client is running some job
# Obtain Resources and Queues
for client_id in clients_by_server[ server_node ]:
info_client = clients_by_server[ server_node ][ client_id ]
if (info_client['state'] in [NodeInfo.IDLE, NodeInfo.USED]): # Client is ON
# Obtain Client node address for checking used resources
info_client ['resources'] = self._get_Client_resources (server_node, client_id)
if info_client['name'] in default_node_info: # Valid node for CLUES and IM
nodeinfolist[ info_client['name'] ] = self._get_NodeInfo(info_client, default_node_info[ info_client['name'] ])
else:
_LOGGER.warning("Nomad Client with name '%s' founded using Nomad Server API but not exists this node in the configuration file %s" % (info_client['name'] , self._nodes_info_file) )
# Add nodes from nomad_info file to the list
for namenode, node_info in list(default_node_info.items()):
if namenode not in nodeinfolist:
nodeinfolist[ namenode ] = NodeInfo(namenode, node_info['cpus'], node_info['cpus'], node_info['memory'], node_info['memory'], node_info['keywords'])
nodeinfolist[ namenode ].state = node_info['state']
# Print all nodes in log with keywords
for key, value in list(nodeinfolist.items()):
string = "%s + keywords={ " % (str(value) )
for key2 in value.keywords:
string += key2 + ":" + str(value.keywords[key2]) +","
string = string[:-1] + "}"
_LOGGER.debug( string )
##_LOGGER.info("***** END - get_nodeinfolist ***** ")
return nodeinfolist
def _get_Jobs_by_Server(self, server_node):
jobs = {}
url = server_node + self._api_version + self._api_url_get_jobs
response = self._create_request('GET', url)
if (response['status_code'] == 200):
for job in response['json']:
jobs[ job['ID'] ]={}
jobs[ job['ID'] ]['status'] = job['Status']
jobs[ job['ID'] ]['status_description'] = job['StatusDescription']
jobs[ job['ID'] ]['job_id'] = job['ID']
jobs[ job['ID'] ]['name'] = job['Name']
jobs[ job['ID'] ]['TaskGroups'] = {}
for taskgroup_id, tasks_info in list(job['JobSummary']['Summary'].items()):
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id] = {}
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id]['name'] = job['ID'] + '-' + taskgroup_id
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id]['cpu'] = 0.0
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id]['memory'] = 0.0
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id]['queue'] = 'no_queue' #self._queues[0]
# Check state
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id]['state'] = Request.UNKNOWN
if (tasks_info['Queued'] > 0 or tasks_info['Starting'] > 0) and jobs[ job['ID'] ]['status'] != "dead":
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id]['state'] = Request.PENDING
else:
jobs[ job['ID'] ]['TaskGroups'][taskgroup_id]['state'] = Request.SERVED
else:
_LOGGER.error("Error getting jobs from Server node with URL = %s: %s: %s" % (server_node, response['status_code'], response['text']))
return jobs
def _get_TaskGroup_resources (self, jobs, server_node):
for job_id in jobs:
url = server_node + self._api_version + self._api_url_get_jobs_info.replace('$JOB_ID$',job_id)
response = self._create_request('GET', url)
if response['status_code'] == 200:
for task_group in response['json']['TaskGroups']:
taskgroup_id = task_group['Name']
if taskgroup_id in jobs[job_id]['TaskGroups']:
# Obtain Queue of the taskgroup
warning_constraint = True
if type(task_group['Constraints']) is list:
for constraint in task_group['Constraints']:
if constraint['LTarget'] == self._queue_constraint_target and constraint['RTarget'] in self._queues:
jobs[job_id]['TaskGroups'][taskgroup_id]['queue'] = constraint['RTarget']
warning_constraint = False
if warning_constraint:
jobs[job_id]['TaskGroups'][taskgroup_id]['queue'] = 'no_queue'
_LOGGER.warning("No '%s' contraint for taskgroup '%s' of the job '%s' or it isn't a valid queue from Server node with URL=%s. This job will not be added to the CLUES list" % (self._queue_constraint_target, taskgroup_id, job_id, server_node))
# Obtain Resources of the taskgroup
jobs[job_id]['TaskGroups'][taskgroup_id]['cpu'] = 0.0
jobs[job_id]['TaskGroups'][taskgroup_id]['memory'] = 0.0
if len(task_group['Tasks']) > 1:
_LOGGER.warning( "Taskgroup '%s' of job '%s' has got multiple tasks and this plugin doesn't support this. " % (taskgroup_id, job_id) )
for task in task_group['Tasks']:
jobs[job_id]['TaskGroups'][taskgroup_id]['cpu'] += float(task['Resources']['CPU']) / self._cpu_mhz_per_core
jobs[job_id]['TaskGroups'][taskgroup_id]['memory'] += float(task['Resources']['MemoryMB'] * 1024 * 1024 )
else:
_LOGGER.error("Error getting job information with job_id = %s from Server node with URL = %s: %s: %s" % (job_id, server_node, response['status_code'], response['text']))
# Default values
for taskgroup_id in list(jobs[job_id]['TaskGroups'].keys()):
jobs[job_id]['TaskGroups'][taskgroup_id]['cpu'] = 0.0
jobs[job_id]['TaskGroups'][taskgroup_id]['memory'] = 0.0
jobs[job_id]['TaskGroups'][taskgroup_id]['queue'] = 'no_queue'
return jobs
def _get_JobInfo(self, info):
queue = '"' + info['queue'] + '" in queues'
taskcount = 1
resources = ResourcesNeeded(info['cpu'], info['memory'], [queue], taskcount)
job_info = JobInfo(resources, info['name'], 1)
# Set state
job_info.set_state(info['state'])
return job_info
def _get_jobinfolist(self, server_nodes_info):
taskinfolist = []
jobs_by_server = {}
# Obtain jobs id
for server_node in server_nodes_info:
# Obtain job_id, job_name, taskgroup name and taskgroup state
jobs_by_server[ server_node ] = self._get_Jobs_by_Server(server_node)
# Obtain task resources for each taskgroup
jobs_by_server[server_node] = self._get_TaskGroup_resources(jobs_by_server[server_node], server_node)
for job_id in jobs_by_server[server_node]:
for taskgroup_id in jobs_by_server[server_node][job_id]['TaskGroups']:
added = 'NOT'
if 'no_queue' != jobs_by_server[server_node][job_id]['TaskGroups'][taskgroup_id]['queue']:
added = ''
taskinfolist.append( self._get_JobInfo( jobs_by_server[server_node][job_id]['TaskGroups'][taskgroup_id] ) )
_LOGGER.debug(" *JOB %s ADDED* - task_name = %s, cpu = %.2f, memory = %.2f, queue = %s and state = %d " % ( added, jobs_by_server[server_node][job_id]['TaskGroups'][taskgroup_id]['name'], jobs_by_server[server_node][job_id]['TaskGroups'][taskgroup_id]['cpu'], jobs_by_server[server_node][job_id]['TaskGroups'][taskgroup_id]['memory'], jobs_by_server[server_node][job_id]['TaskGroups'][taskgroup_id]['queue'], jobs_by_server[server_node][job_id]['TaskGroups'][taskgroup_id]['state'] ) )
return taskinfolist, jobs_by_server
if __name__ == '__main__':
pass
| grycap/clues | cluesplugins/nomad.py | Python | gpl-3.0 | 29,940 | 0.012792 |
import json
import logging
import re
from contextlib import contextmanager
@contextmanager
def disable_warnings(logger_name):
"""
Suppresses expected warning messages to keep the test output clean.
"""
logger = logging.getLogger(logger_name)
current_level = logger.level
logger.setLevel(logging.ERROR)
yield
logger.setLevel(current_level)
def extract_form_failures(html):
"""
Given raw HTML content from an HTTP response, returns a list of form errors.
"""
FORM_ERROR_REGEX = r"<!-- FORM-ERROR (.*) -->"
return re.findall(FORM_ERROR_REGEX, str(html))
def json_file_to_python_type(filename):
with open(filename, mode="r") as f:
return json.load(f)
def post_data(data):
"""
Takes a dictionary of test data and returns a dict suitable for POSTing.
"""
r = {}
for key, value in data.items():
if value is None:
r[key] = ""
elif type(value) in (list, tuple):
if value and hasattr(value[0], "pk"):
# Value is a list of instances
r[key] = [v.pk for v in value]
else:
r[key] = value
elif hasattr(value, "pk"):
# Value is an instance
r[key] = value.pk
else:
r[key] = str(value)
return r
| respawner/peering-manager | utils/testing/functions.py | Python | apache-2.0 | 1,327 | 0.000754 |
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template('auth/change_password.html', form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template('auth/change_email.html', form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
| haup/totoro | totoro/app/auth/views.py | Python | gpl-3.0 | 6,038 | 0 |
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
print "jojo"
if request.method == 'POST':
language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
return redirect('/api')
| spezifanta/Paste-It | api/v01/views.py | Python | mit | 749 | 0.006676 |
import utils
import os
from functools import partial
import sys
from itertools import groupby as itertools_groupby
mlst_timer = partial(utils.timer, name='MLST')
def get_species_scheme_map_version(mlst_folder):
species_scheme_map_version = 1
mlst_db_path = os.path.join(os.path.dirname(os.path.dirname(mlst_folder)), 'db', 'species_scheme_map.tab')
if not os.path.isfile(mlst_db_path):
mlst_db_path = os.path.join(os.path.dirname(os.path.dirname(mlst_folder)), 'db', 'scheme_species_map.tab')
if not os.path.isfile(mlst_db_path):
sys.exit('ERROR: species_scheme_map not found. Contact the developers. In the meantime try running INNUca'
' with --skipMLST option')
else:
species_scheme_map_version = 2
return mlst_db_path, species_scheme_map_version
def set_species_scheme_map_variables(list_values, species_scheme_map_version):
if species_scheme_map_version == 1:
val_genus = list_values[0]
val_species = list_values[1]
val_scheme = list_values[2]
elif species_scheme_map_version == 2:
val_genus = list_values[1]
val_species = list_values[2]
val_scheme = list_values[0]
return val_genus, val_species, val_scheme
def parse_species_scheme_map(species_splited, mlst_db_path, species_scheme_map_version):
scheme = 'unknown'
genus_mlst_scheme = None
with open(mlst_db_path, 'rtU') as reader:
for line in reader:
line = line.splitlines()[0]
if len(line) > 0:
if not line.startswith('#'):
line = line.lower().split('\t')
line = [line[i].split(' ')[0] for i in range(0, len(line))]
val_genus, val_species, val_scheme = set_species_scheme_map_variables(line,
species_scheme_map_version)
if val_genus == species_splited[0]:
if val_species == '':
genus_mlst_scheme = val_scheme
elif val_species == species_splited[1]:
scheme = val_scheme
if scheme == 'unknown' and genus_mlst_scheme is not None:
scheme = genus_mlst_scheme
return scheme, genus_mlst_scheme
def getScheme(species):
command = ['which', 'mlst']
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, False)
mlst_folder = os.path.abspath(os.path.realpath(stdout.splitlines()[0]))
mlst_db_path, species_scheme_map_new = get_species_scheme_map_version(mlst_folder)
scheme, genus_mlst_scheme = parse_species_scheme_map(species.lower().split(' '), mlst_db_path,
species_scheme_map_new)
print('\n' + 'MLST scheme found for {species}: {scheme}'.format(species=species, scheme=scheme))
return scheme, species.lower().split(' ')[0], genus_mlst_scheme
def getBlastPath():
print('\n' + 'The following blastn will be used')
command = ['which', 'blastn']
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, True)
print(stdout)
def clean_novel_alleles(novel_alleles, scheme_mlst, profile):
"""
Clean the fasta file with the novel alleles produced by mlst
Parameters
----------
novel_alleles : str
Path for fasta file containing the novel alleles
scheme_mlst : str
MLST schema found by mlst
profile : list
List of strings with the profile found
Returns
-------
"""
unknown_genes = []
for gene_allele in profile:
gene = gene_allele.split('(')[0]
try:
allele = gene_allele.split('(')[1].rstrip(')')
if allele.startswith('~'):
unknown_genes.append(gene)
except IndexError as e:
print('WARNING: {}'.format(e))
novel_alleles_keep = {}
if len(unknown_genes) > 0:
reader = open(novel_alleles, mode='rt') # TODO: newline=None in Python3
fasta_iter = (g for k, g in itertools_groupby(reader, lambda x: x.startswith('>')))
for header in fasta_iter:
# header = header.__next__()[1:].rstrip('\r\n') # TODO: Python3
header = header.next()[1:].rstrip('\r\n')
# seq = ''.join(s.rstrip('\r\n') for s in fasta_iter.__next__()) # TODO: Python3
seq = ''.join(s.rstrip('\r\n') for s in fasta_iter.next())
if header.startswith(scheme_mlst):
gene = header.split('.')[1].split('~')[0]
if gene in unknown_genes:
novel_alleles_keep[header] = seq
reader.close()
os.remove(novel_alleles)
if len(novel_alleles_keep) > 0:
with open(novel_alleles, 'wt') as writer:
for header, seq in novel_alleles_keep.items():
writer.write('>{}\n'.format(header))
writer.write('\n'.join(utils.chunkstring(seq, 80)) + '\n')
@mlst_timer
def runMlst(contigs, scheme, outdir, species_genus, mlst_scheme_genus):
pass_qc = False
failing = {}
failing['sample'] = False
warnings = {}
novel_alleles = os.path.join(outdir, 'mlst_novel_alleles.fasta')
command = ['mlst', '--novel', novel_alleles, contigs]
run_successfully, stdout, _ = utils.runCommandPopenCommunicate(command, False, None, True)
if run_successfully:
scheme_mlst = stdout.splitlines()[0].split('\t')[1].split('_')[0]
st = stdout.splitlines()[0].split('\t')[2]
profile = stdout.splitlines()[0].split('\t')[3:]
if st == '-' and os.path.isfile(novel_alleles):
clean_novel_alleles(novel_alleles=novel_alleles, scheme_mlst=scheme_mlst, profile=profile)
else:
if os.path.isfile(novel_alleles):
os.remove(novel_alleles)
report = 'MLST found ST ' + str(st) + ' from scheme ' + scheme_mlst
print(report)
with open(os.path.join(outdir, 'mlst_report.txt'), 'wt') as writer:
writer.write('#scheme' + '\n' + scheme_mlst + '\n' + '#ST' + '\n' + st + '\n')
writer.write('#profile' + '\n' + ' '.join(profile) + '\n')
writer.flush()
if scheme_mlst.split('_', 1)[0] == scheme.split('_', 1)[0]:
pass_qc = True
else:
if scheme == 'unknown' and scheme_mlst != '-':
pass_qc = True
warnings['sample'] = 'Found {scheme_mlst} scheme for a species with unknown' \
' scheme'.format(scheme_mlst=scheme_mlst)
elif scheme == 'unknown' and scheme_mlst == '-':
pass_qc = True
elif scheme != 'unknown' and scheme_mlst == '-':
pass_qc = True
warnings['sample'] = 'Could not find a scheme for a species with known scheme ({})'.format(scheme)
elif species_genus == 'yersinia' and mlst_scheme_genus == 'yersinia':
pass_qc = True
warnings['sample'] = 'Found a Yersinia scheme ({scheme_mlst}), but it is different from what it was' \
' expected ({scheme})'.format(scheme_mlst=scheme_mlst, scheme=scheme)
else:
if mlst_scheme_genus is not None and scheme_mlst == scheme == mlst_scheme_genus:
pass_qc = True
else:
failing['sample'] = 'MLST scheme found ({scheme_mlst}) and provided ({scheme}) are not the' \
' same'.format(scheme_mlst=scheme_mlst, scheme=scheme)
print(failing['sample'])
else:
failing['sample'] = 'Did not run'
if len(warnings) > 0:
print(warnings['sample'])
return run_successfully, pass_qc, failing, warnings
| B-UMMI/INNUca | modules/mlst.py | Python | gpl-3.0 | 7,916 | 0.003284 |
"""Add user table
Revision ID: 42ad047fd7ff
Revises: 5cd786f3176
Create Date: 2014-10-03 12:14:11.091123
"""
# revision identifiers, used by Alembic.
revision = '42ad047fd7ff'
down_revision = '5cd786f3176'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('timezone', sa.String(), nullable=True),
sa.Column('admin', sa.Boolean(), server_default='false', nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
| msimacek/koschei | alembic/versions/42ad047fd7ff_add_user_table.py | Python | gpl-2.0 | 903 | 0.014396 |
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class UacGetOrganizationByIdResponse(BaseType):
def __init__(self, organization=None):
required = {
"organization": False,
}
self.organization = organization
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .UacOrganization import UacOrganization
tmp = d.get('organization', None)
if tmp is not None:
d['organization'] = UacOrganization.from_json(tmp)
return UacGetOrganizationByIdResponse(**d)
| mitdbg/modeldb | client/verta/verta/_swagger/_public/uac/model/UacGetOrganizationByIdResponse.py | Python | mit | 653 | 0.01072 |
"""
podpy is an implementatin of the pixel optical depth method as described in
Turner et al. 2014, MNRAS, 445, 794, and Aguirre et al. 2002, ApJ, 576, 1.
Please contact the author (Monica Turner) at turnerm@mit.edu if you have
any questions, comment or issues.
"""
| turnerm/podpy | podpy/__init__.py | Python | mit | 271 | 0.01845 |
# being a bit too dynamic
from distutils.version import LooseVersion
import operator
def _mpl_version(version, op):
def inner():
try:
import matplotlib as mpl
except ImportError:
return False
return (
op(LooseVersion(mpl.__version__), LooseVersion(version))
and str(mpl.__version__)[0] != "0"
)
return inner
_mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge)
_mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge)
_mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge)
| toobaz/pandas | pandas/plotting/_matplotlib/compat.py | Python | bsd-3-clause | 554 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket, time
import sys
import multiprocessing
def readlines(sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.recv(recv_buffer)
buffer += data
while buffer.find(delim) != -1:
line, buffer = buffer.split('\n', 1)
yield line
return
def main(data_queue):
HOST = "192.168.1.146" # Symbolic name meaning all available interfaces
PORT = 9090 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
message = readlines(conn)
while 1:
try:
data_queue.put(eval(message.next()))
except Exception as err:
print 'Error on server: ', err
conn.close()
sys.exit() | redarmy30/Eurobot-2017 | old year/RESET-master/Testing/server_small.py | Python | mit | 805 | 0.034783 |
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import alembic
from oslo_serialization import jsonutils
import sqlalchemy as sa
from nailgun.db import db
from nailgun.db import dropdb
from nailgun.db.migration import ALEMBIC_CONFIG
from nailgun.db.migration import make_alembic_config_from_extension
from nailgun.extensions.consts import extensions_migration_buffer_table_name
from nailgun.test import base
from volume_manager.extension import VolumeManagerExtension
_core_test_revision = '1e50a4903910'
_extension_test_revision = '086cde3de7cf'
def setup_module():
dropdb()
# Run core migration in order to create buffer table
alembic.command.upgrade(ALEMBIC_CONFIG, _core_test_revision)
prepare()
# Run extension migrations
ext_alembic_config = make_alembic_config_from_extension(
VolumeManagerExtension)
alembic.command.upgrade(ext_alembic_config, _extension_test_revision)
def prepare():
meta = base.reflect_db_metadata()
# Fill in migration table with data
db.execute(
meta.tables[extensions_migration_buffer_table_name].insert(),
[{'extension_name': 'volume_manager',
'data': jsonutils.dumps({'node_id': 1, 'volumes': [{'volume': 1}]})},
{'extension_name': 'volume_manager',
'data': jsonutils.dumps({'node_id': 2, 'volumes': [{'volume': 2}]})},
{'extension_name': 'some_different_extension',
'data': 'some_data'}])
db.commit()
class TestVolumeManagerExtensionAddVolumesTable(base.BaseAlembicMigrationTest):
@classmethod
def setUpClass(cls):
setup_module()
def test_add_volumes_table(self):
result = db.execute(
sa.select([
self.meta.tables['volume_manager_node_volumes'].c.node_id,
self.meta.tables['volume_manager_node_volumes'].c.volumes]))
records = list(result)
node_ids = [r[0] for r in records]
self.assertItemsEqual(node_ids, [1, 2])
volumes = [jsonutils.loads(r[1]) for r in records]
self.assertItemsEqual(
[[{'volume': 1}], [{'volume': 2}]],
volumes)
result = db.execute(
sa.select([
self.meta.tables[
extensions_migration_buffer_table_name].c.extension_name,
self.meta.tables[
extensions_migration_buffer_table_name].c.data]))
self.assertEqual(
list(result),
[('some_different_extension', 'some_data')])
| gitfred/fuel-extension-volume-manager | volume_manager/tests/test_migration_volume_manager_extension_001_add_volumes_table.py | Python | apache-2.0 | 3,073 | 0 |
"""
Tests for DistCI task management interfaces
Copyright (c) 2012-2013 Heikki Nousiainen, F-Secure
See LICENSE for details
"""
from nose.plugins.skip import SkipTest
from webtest import TestApp, TestRequest
import json
import tempfile
import os
import shutil
from distci import frontend
class TestTasks:
app = None
config_file = None
data_directory = None
test_state = {}
@classmethod
def setUpClass(cls):
cls.data_directory = '/Users/noushe/CI-proto'
cls.data_directory = tempfile.mkdtemp()
config_file = os.path.join(cls.data_directory, 'frontend.conf')
os.mkdir(os.path.join(cls.data_directory, 'tasks'))
config = { "data_directory": cls.data_directory }
json.dump(config, file(config_file, 'wb'))
frontend_app = frontend.Frontend(config)
cls.app = TestApp(frontend_app)
@classmethod
def tearDownClass(cls):
cls.app = None
shutil.rmtree(cls.data_directory)
def test_01_list_tasks_empty(self):
response = self.app.request('/tasks')
result = json.loads(response.body)
assert result.has_key('tasks'), "Tasks entry went missing"
assert len(result['tasks']) == 0, "Tasks entry was not empty"
def test_02_post_task(self):
task_data = json.dumps({ 'command': 'something' })
request = TestRequest.blank('/tasks', content_type='application/json')
request.method = 'POST'
request.body = task_data
response = self.app.do_request(request, 201, False)
result = json.loads(response.body)
assert result.has_key('id'), "ID entry went missing"
assert result.has_key('data'), "data entry went missing"
self.test_state['id'] = str(result['id'])
def test_03_check_single_task(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task status, no recorded state")
response = self.app.request('/tasks/%s' % task_id)
result = json.loads(response.body)
assert result['id'] == task_id, "ID mismatch"
assert result['data']['command'] == 'something', "Wrong data"
def test_04_update(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task update, no recorded state")
new_task_data = json.dumps({'command': 'something_else', 'assignee': 'my-id'})
request = TestRequest.blank('/tasks/%s' % task_id, content_type='application/json')
request.method = 'PUT'
request.body = new_task_data
response = self.app.do_request(request, 200, False)
result = json.loads(response.body)
assert result.has_key('id'), "ID entry went missing"
assert result.has_key('data'), "data entry went missing"
assert result['data']['command'] == 'something_else', "Wrong command"
assert result['data']['assignee'] == 'my-id', "Wrong assignee"
def test_05_update_with_wrong_assignee(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task update, no recorded state")
new_task_data = json.dumps({'command': 'something_else', 'assignee': 'my-id-not-same'})
request = TestRequest.blank('/tasks/%s' % task_id, content_type='application/json')
request.method = 'PUT'
request.body = new_task_data
response = self.app.do_request(request, 409, False)
def test_06_list_tasks(self):
response = self.app.request('/tasks')
result = json.loads(response.body)
assert result.has_key('tasks'), "Tasks entry went missing"
assert len(result['tasks']) == 1, "Invalid task count"
task_id = self.test_state.get('id')
if task_id is not None:
assert task_id in result['tasks'], "Task not included in the list"
def test_07_delete(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task status, no recorded state")
request = TestRequest.blank('/tasks/%s' % task_id)
request.method = 'DELETE'
response = self.app.do_request(request, 204, False)
response = self.app.request('/tasks')
result = json.loads(response.body)
assert result.has_key('tasks'), "Tasks entry went missing"
assert len(result['tasks']) == 0, "Invalid task count"
| F-Secure/distci | src/distci/frontend/tests/test-frontend-tasks.py | Python | apache-2.0 | 4,501 | 0.004666 |
import os
import string
import random
import mmap
import sys
DATATYPES = dict(
ascii_letters=string.ascii_letters.encode(),
ascii_lowercase=string.ascii_lowercase.encode(),
ascii_uppercase=string.ascii_uppercase.encode(),
digits=string.digits.encode(),
hexdigits=string.hexdigits.encode(),
octdigits=string.octdigits.encode(),
punctuation=string.punctuation.encode(),
whitespace=string.whitespace.encode(),
ascii=string.printable.encode(),
bytes=bytes(range(256))
)
class TransformGenerator:
"""
Perform a byte-by-byte transform another generator - that is, for each
input byte, the transformation must produce one output byte.
gen: A generator to wrap
transform: A function (offset, data) -> transformed
"""
def __init__(self, gen, transform):
self.gen = gen
self.transform = transform
def __len__(self):
return len(self.gen)
def __getitem__(self, x):
d = self.gen.__getitem__(x)
if isinstance(x, slice):
return self.transform(x.start, d)
return self.transform(x, d)
def __repr__(self):
return "'transform(%s)'" % self.gen
def rand_byte(chars):
"""
Return a random character as byte from a charset.
"""
# bytearray has consistent behaviour on both Python 2 and 3
# while bytes does not
return bytes([random.choice(chars)])
class RandomGenerator:
def __init__(self, dtype, length):
self.dtype = dtype
self.length = length
def __len__(self):
return self.length
def __getitem__(self, x):
chars = DATATYPES[self.dtype]
if isinstance(x, slice):
return b"".join(rand_byte(chars) for _ in range(*x.indices(min(self.length, sys.maxsize))))
return rand_byte(chars)
def __repr__(self):
return "%s random from %s" % (self.length, self.dtype)
class FileGenerator:
def __init__(self, path):
self.path = os.path.expanduser(path)
def __len__(self):
return os.path.getsize(self.path)
def __getitem__(self, x):
with open(self.path, mode="rb") as f:
if isinstance(x, slice):
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mapped:
return mapped.__getitem__(x)
else:
f.seek(x)
return f.read(1)
def __repr__(self):
return "<%s" % self.path
| zlorb/mitmproxy | pathod/language/generators.py | Python | mit | 2,469 | 0.00081 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import gettext
from enum import Enum, unique
_ = gettext.gettext
strategy_descriptions = [_("New resourcelist strategy"),
_("New changelist strategy"),
_("Incremental changelist strategy")]
@unique
class Strategy(Enum):
"""
:samp:`Strategy for ResourceSync Publishing`
"""
resourcelist = 0
"""
``0`` :samp:`New resourcelist {strategy}`
Create new resourcelist(s) every run.
"""
new_changelist = 1
"""
``1`` :samp:`New changelist {strategy}`
Create a new changelist every run.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
inc_changelist = 2
"""
``2`` :samp:`Incremental changelist {strategy}`
Add changes to an existing changelist. If no changelist exists, create a new one.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
# resourcedump = 3 # not implemented
# changedump = 4 # not implemented
@staticmethod
def names():
"""
:samp:`Get Strategy names`
:return: List<str> of names
"""
names = dir(Strategy)
return [x for x in names if not x.startswith("_")]
@staticmethod
def sanitize(name):
"""
:samp:`Verify a {Strategy} name`
:param str name: string to test
:return: name if it is the name of a strategy
:raises: :exc:`ValueError` if the given name is not the name of a strategy
"""
try:
strategy = Strategy[name]
return strategy.name
except KeyError as err:
raise ValueError(err)
@staticmethod
def strategy_for(value):
"""
:samp:`Get a Strategy for the given value`
:param value: may be :class:`Strategy`, str or int
:return: :class:`Strategy`
:raises: :exc:`ValueError` if the given value could not be converted to a :class:`Strategy`
"""
try:
if isinstance(value, Strategy):
return value
elif isinstance(value, int):
return Strategy(value)
else:
return Strategy[value]
except KeyError as err:
raise ValueError(err)
def describe(self):
return strategy_descriptions[self.value]
class Capability(Enum):
"""
:samp:`Capabilities as defined in the ResourceSync Framework`
"""
resourcelist = 0
"""
``0`` :samp:`resourcelist`
"""
changelist = 1
"""
``1`` :samp:`changelist`
"""
resourcedump = 2
"""
``2`` :samp:`resourcedump`
"""
changedump = 3
"""
``3`` :samp:`changedump`
"""
resourcedump_manifest = 4
"""
``4`` :samp:`resourcedump_manifest`
"""
changedump_manifest = 5
"""
``5`` :samp:`changedump_manifest`
"""
capabilitylist = 6
"""
``6`` :samp:`capabilitylist`
"""
description = 7
"""
``7`` :samp:`description`
"""
class SelectMode(Enum):
"""
:samp:`Mode of selection`
"""
simple = 0
selector = 1
@staticmethod
def names():
"""
:samp:`Get SelectMode names`
:return: List<str> of names
"""
names = dir(SelectMode)
return [x for x in names if not x.startswith("_")]
@staticmethod
def select_mode_for(mode):
try:
if isinstance(mode, SelectMode):
return mode
elif isinstance(mode, int):
return SelectMode(mode)
else:
return SelectMode[mode]
except KeyError as err:
raise ValueError(err)
| EHRI/rspub-core | rspub/core/rs_enum.py | Python | apache-2.0 | 3,776 | 0.001324 |
# -*- encoding: utf-8 -*-
"""Implements Architecture UI"""
from robottelo.constants import FILTER
from robottelo.ui.base import Base
from robottelo.ui.locators import common_locators, locators
from robottelo.ui.navigator import Navigator
class Architecture(Base):
"""Manipulates architecture from UI"""
def navigate_to_entity(self):
"""Navigate to Architecture entity page"""
Navigator(self.browser).go_to_architectures()
def _search_locator(self):
"""Specify locator for Architecture entity search procedure"""
return locators['arch.arch_name']
def create(self, name, os_names=None):
"""Creates new architecture from UI with existing OS"""
self.click(locators['arch.new'])
self.assign_value(locators['arch.name'], name)
self.configure_entity(os_names, FILTER['arch_os'])
self.click(common_locators['submit'])
def delete(self, name, really=True):
"""Delete existing architecture from UI"""
self.delete_entity(
name,
really,
locators['arch.delete'],
)
def update(self, old_name, new_name=None, os_names=None,
new_os_names=None):
"""Update existing arch's name and OS"""
self.search_and_click(old_name)
if new_name:
self.assign_value(locators['arch.name'], new_name)
self.configure_entity(
os_names,
FILTER['arch_os'],
new_entity_list=new_os_names
)
self.click(common_locators['submit'])
| elyezer/robottelo | robottelo/ui/architecture.py | Python | gpl-3.0 | 1,561 | 0 |
# -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
from . import hotel_wizard
from . import sale_make_invoice_advance
| JayVora-SerpentCS/vertical-hotel | hotel/wizard/__init__.py | Python | agpl-3.0 | 153 | 0 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Score.created_at'
db.add_column(u'core_score', 'created_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.updated_at'
db.add_column(u'core_score', 'updated_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.changed_by'
db.add_column(u'core_score', 'changed_by',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'core_score_related', null=True, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Score.created_at'
db.delete_column(u'core_score', 'created_at')
# Deleting field 'Score.updated_at'
db.delete_column(u'core_score', 'updated_at')
# Deleting field 'Score.changed_by'
db.delete_column(u'core_score', 'changed_by_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.image': {
'Meta': {'object_name': 'Image'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_image_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.indicator': {
'Meta': {'object_name': 'Indicator'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_indicator_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['forms.Form']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'form_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maximum_monthly_records': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'passing_percentage': ('django.db.models.fields.FloatField', [], {'default': '85'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.location': {
'Meta': {'object_name': 'Location'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_location_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Image']", 'null': 'True', 'blank': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Indicator']", 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'position': ('geoposition.fields.GeopositionField', [], {'max_length': '42'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.score': {
'Meta': {'object_name': 'Score'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_score_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'entry_count': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Indicator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Location']"}),
'month': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'passing': ('django.db.models.fields.BooleanField', [], {}),
'passing_entry_count': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.FloatField', [], {'default': '85'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'forms.form': {
'Meta': {'object_name': 'Form'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['core'] | SM2015/orchid | core/migrations/0002_auto__add_field_score_created_at__add_field_score_updated_at__add_fiel.py | Python | mit | 11,751 | 0.007404 |
from django.contrib.auth.models import User
from is_core.main import UIRestModelISCore
from .models import Issue
class IssueIsCore(UIRestModelISCore):
model = Issue
class UserIsCore(UIRestModelISCore):
model = User | asgeirrr/django-is-core | example/issues/cores.py | Python | lgpl-3.0 | 228 | 0.004386 |
'''
This version of the terrain generator library interpreter is set up to produce terrain maps using Perlin noise.
Important settings and inits specific to the Perlin generator can be found in the code by searching for cfref:perlin-gen
Additional display options may be viewed by swapping WHICH_COLOR_SCHEME constant is commented. 'terrain' and 'grayscale' are the only interesting options for this particular generator's current arrangement.
Note that the dependencies do NOT include math, even though the generator library does.
'''
import NoiseMapGenerators_14 as NoiseMapGenerators
import pygame
import sys
import random
#### Constants ####
WINDOW_CAPTION = "NoiseMapGenerator Library Demonstration"
WHICH_COLOR_SCHEME = 'terrain'
#WHICH_COLOR_SCHEME = 'grayscale'
## Starfield will require extensive parameters testing with each generator.
#WHICH_COLOR_SCHEME = 'starfield'
#WHICH_COLOR_SCHEME = 'dungeon'
## For display purposes.
MAPTILE_SIZE_IN_PIXELS = MAPTILE_WIDTH_IN_PIXELS, MAPTILE_HEIGHT_IN_PIXELS = 4, 4
print("MAPTILE_SIZE_IN_PIXELS == %s" % str(MAPTILE_SIZE_IN_PIXELS))
## The NOISE_WIDTH (and NOISE_HEIGHT) options adjust the size of the map that is created. Or at least I'd be awfully surprised to learn that isn't what they do.
NOISE_WIDTH = 120
## Adjusted this to make it a perfect square for testing purposes.
NOISE_HEIGHT = NOISE_WIDTH
#NOISE_HEIGHT = 16
#### Dungeon map testing observations ####
## Does not need to be commented when not using a dungeon map generator.
ROOM_MAX_SIZE = 18
ROOM_MIN_SIZE = 6
## Note: Room max count and room min count are not currently used by the RoomFilledMapGenerator.
ROOM_MAX_COUNT = 14444
ROOM_MIN_COUNT = 12
#### Simplex noise testing observations ####
#NOISE_FREQUENCY = 0.01
#NOISE_OCTAVES = 32
#NOISE_PERSISTENCE = 0.5
## IMPORTANT!!! Frequency MUST BE VERY LOW! Beneath 1.0 and above 0.0, possibly always beneath 0.1
## Otherwise the "width of tiles on the planet's surface" is going to be SMALLER THAN A MAPTILE.
## This makes it seem hugely spikey, when simplex noise should be smooth and cloudlike.
## I got decent results at 0.01 frequency with all sorts of octaves and 0.5 persistence.
## 0.01f 32o 0.5p looks fairly zoomed in, though.
## Below data is from before I discovered the above.
## octaves seems to smoothen it out as it increases, as with before
## f2 o2 p2-4-8-16 seemed to create more land area with more p, though p0 created plenty of land and was extremely spikey
## no visible difference between f2o2p64 and f2o2p512, but f2o2p2 had more water and seemed less spikey
## f32o2p2 made visible lleft-->uright diagonal repetition
## f256o2p2 made a series of lleft-->uright streaks.
## I think scaling up frequency too far makes it more obviously repeat itself.
## f1o2p1 had visible lleft-->uright diagonal repetition
## ...
## heh oops. Persistence should be between 0 and 1.
#### Perlin noise testing observations #### cfref:perlin-gen
# width of the tiles on the planet's surface??
#NOISE_FREQUENCY = 64
# how close we are to the tiles?? <-- this seems to be a decent interpretation of its effects
#NOISE_OCTAVES = 1024
## Lower octaves makes it calculate faster. Higher frequency:octaves ratio makes it spikier and more repetitive.
#NOISE_FREQUENCY = 32
#NOISE_OCTAVES = 512
#NOISE_FREQUENCY = 8
#NOISE_OCTAVES = 128
## Nice smooth islands. Probably.
NOISE_FREQUENCY = 3
NOISE_OCTAVES = 64
## Could this algorithm be used to zoom in to a specific map by creating one particular randseed and using it for every pass of the algorithm? Would need to modify the generator.
MAP_SIZE_IN_TILES = MAP_WIDTH_IN_TILES, MAP_HEIGHT_IN_TILES = NOISE_WIDTH, NOISE_HEIGHT
print("MAP_SIZE_IN_TILES == %s" % str(MAP_SIZE_IN_TILES))
## Size the screen so the maptiles fit in it neatly.
SCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = (MAP_WIDTH_IN_TILES * MAPTILE_WIDTH_IN_PIXELS), (MAP_HEIGHT_IN_TILES * MAPTILE_HEIGHT_IN_PIXELS)
print("SCREEN_SIZE == %s" % str(SCREEN_SIZE))
print("\n")
## In this program, BLACK is used to clean the screen when redrawing.
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
LIGHT_GRAY = [120, 120, 120]
DARK_GRAY = [50, 50, 50]
RED = [255, 0, 0]
GREEN = [0, 255, 0]
BLUE = [0, 0, 255]
DARK_BLUE = [0, 0, 150]
DEEP_BLUE = [0, 0, 75]
BRAUN = [95, 45, 0]
LIGHT_BRAUN = [115, 65, 20]
SANDY_TAN = [245, 165, 95]
DARK_GREEN = [0, 155, 0]
LIGHT_GREEN = [50, 255, 50]
DEBUG_PINK = [224, 176, 255]
#### Classes ####
class MapTile:
def __init__(self, supplied_x_in_maptiles, supplied_y_in_maptiles, supplied_z):
''' Make a MapTile object with coordinates on the screen and in the map (meansured in pixels and tiles, respectively). Each MapTile has a magnitude, called: z '''
self.x = supplied_x_in_maptiles
self.pixel_x = self.x * MAPTILE_WIDTH_IN_PIXELS
self.y = supplied_y_in_maptiles
self.pixel_y = self.y * MAPTILE_HEIGHT_IN_PIXELS
self.z = supplied_z
if self.z != None:
if self.z > 255:
self.z = 255
elif self.z < 0:
self.z = 0
elif self.z == None:
#print("NONE DETECTED in self.z!! " + str(self.z))
pass
def draw_maptile(self):
## Regardless of the color scheme, pixels with value of None type will be set to DEBUG_PINK.
if type(self.z) == None:
_color_of_this_pixel = DEBUG_PINK
pygame.draw.rect(screen, _color_of_this_pixel, [self.pixel_x, self.pixel_y, MAPTILE_WIDTH_IN_PIXELS, MAPTILE_HEIGHT_IN_PIXELS])
return
if WHICH_COLOR_SCHEME == 'terrain':
if self.z < 90:
_color_of_this_pixel = DEEP_BLUE
elif self.z < 120:
_color_of_this_pixel = DARK_BLUE
elif self.z < 160:
_color_of_this_pixel = BLUE
elif self.z < 170:
_color_of_this_pixel = GREEN
elif self.z < 180:
_color_of_this_pixel = DARK_GREEN
elif self.z < 190:
_color_of_this_pixel = GREEN
elif self.z < 200:
_color_of_this_pixel = BRAUN
elif self.z < 210:
_color_of_this_pixel = LIGHT_BRAUN
else:
_color_of_this_pixel = WHITE
elif WHICH_COLOR_SCHEME == 'starfield':
if self.z == 1:
_color_of_this_pixel = WHITE
elif self.z == 2:
_color_of_this_pixel = LIGHT_GRAY
elif self.z == 3:
_color_of_this_pixel = DARK_GRAY
else:
_color_of_this_pixel = BLACK
elif WHICH_COLOR_SCHEME == 'dungeon':
if self.z == 0:
_color_of_this_pixel = BLACK
elif self.z == 1:
_color_of_this_pixel = LIGHT_GRAY
else:
_color_of_this_pixel = DEBUG_PINK
#print("\n OMG DEBUG PINK OMG OMG\n x: %d y: %d" % (self.x, self.y))
elif WHICH_COLOR_SCHEME == 'grayscale':
_color_of_this_pixel = [self.z, self.z, self.z] # I summon thee
pygame.draw.rect(screen, _color_of_this_pixel, [self.pixel_x, self.pixel_y, MAPTILE_WIDTH_IN_PIXELS, MAPTILE_HEIGHT_IN_PIXELS])
#### Functions ####
def convert_noise_map_to_maptile_map(supplied_map):
''' Return a list full of MapTiles with x, y and z values corresponding to the output of a TerrainGenerator. '''
## Note: This whole script is an example of how to interpret the more "pure" results of a noise generator as something useful to another program.
## This program interprets the noise as "terrain maps" and therefore reads the x, y and z values as a map. This function handles that conversion and could conceivably be expanded to stack multiple supplied_maps of noise, or stitch maps together or something. Or even do something rather unmaplike with the x/y/z coordinates, if desired.
new_maptile_map = []
for each_y_index in range(0, len(supplied_map)):
for each_x_index in range(0, len(supplied_map[each_y_index])):
new_maptile = MapTile(each_x_index, each_y_index, supplied_map[each_y_index][each_x_index])
new_maptile_map.append(new_maptile)
return new_maptile_map
def handle_keys(should_we_keep_the_window_open):
''' Interpret pressed keys as input commands. '''
for event in pygame.event.get(): # NOTE: This does not seem to allow for continuously-held-down keys being re-read if another key is pressed and released during the first key's held period.
if event.type == pygame.QUIT:
sys.exit
elif event.type == pygame.KEYDOWN:
## events and KEYDOWN prevent multiple firings from holding down buttan.
if event.key == pygame.K_ESCAPE:
sys.exit
pygame.quit
should_we_keep_the_window_open = False ## NOTE: Only this line ACTUALLY works! How humorous.
return should_we_keep_the_window_open
def render_all(array_of_all_maptiles):
''' Draw ALL THE PIXELS! '''
screen.fill(BLACK)
for _each_nook in array_of_all_maptiles:
_each_nook.draw_maptile()
## Don't forget this line. It uses pygame to put the things you want to see on the thing you can see.
pygame.display.flip()
#### Inits ####
## The type of generator you want to use can be changed here for now.
## Options currently include:
## PlasmaFractalGenerator()
## PerlinNoiseGenerator()
## SimplexNoiseGenerator()
## DungeonMapGenerator()
## RoomFilledMapGenerator()
## MarkIIDungeonMapGenerator()
##
## Of these, the Simplex generator is the most technically complex but is theoretically faster at creating a noise map than the Plasma and Perlin generators.
## It's not clear whether my implementation is even close to optimized for speed, though. I don't yet know enough about python/C integration to try speeding it up.
## The Perlin generator return the best-looking terrain maps, possibly tied with the Simplex generator. They both require some fiddling with generator input parameters to get better-looking results.
## The plasma generator has some gridwards bias, but it too produces decent noise clouds, as long as you don't look too closely or get too unlucky.
## TerrainMapGenerators contains noise generators and "dungeon map generators," which are more like signal than noise, as they return maps full of rooms and corridors illustrated using two Z values (0 and 1).
## The DungeonMapGenerator produces randomly placed rectangular rooms that all connect to each other using L-shaped corridors daisy chained from one room's centerpoint to the next, in the order of room placement. This algorithm was inspired by/copied from the libtcod roguelike tutorial at < http://www.roguebasin.com/index.php?title=Complete_Roguelike_Tutorial,_using_python%2Blibtcod,_part_1 >.
## The RoomFilledMapGenerator creates maps packed full of rectangular rooms. Has significant bias and no connecting corridors. I didn't really like the direction it was going in, but it can probably be turned into something decent with some upgrading and tweaking.
## The MarkIIDungeonMapGenerator is my favorite one so far. It produces maps wherein the rooms are connected in a branching pattern such that dungeons have "wings" which can be quite lengthy and significantly subdivided.
#### MK II DUNGEON MAP GENERATOR DEMONSTRATION ####
#the_mk_ii_dungeon_map_generator = NoiseMapGenerators.MarkIIDungeonMapGenerator()
#### ROOM-FILLED MAP DEMONSTRATION ####
#the_room_filled_map_generator = NoiseMapGenerators.RoomFilledMapGenerator()
#### DUNGEON MAP DEMONSTRATION ####
#the_dungeon_generator = NoiseMapGenerators.DungeonMapGenerator()
#### SIMPLEX NOISE DEMONSTRATION ####
#the_simplex_generator = NoiseMapGenerators.SimplexNoiseGenerator()
#### PERLIN NOISE DEMONSTRATION #### cfref:perlin-gen
the_perlin_generator = NoiseMapGenerators.PerlinNoiseGenerator()
#### PLASMA FRACTAL DEMONSTRATION ####
#the_plasma_generator = NoiseMapGenerators.PlasmaFractalGenerator()
## I think I wrote this before I realized I wanted to provide sensible silent defaults for all the little options.
## It may or may not work with the 3 following lines commented.
#the_plasma_generator.displacement_min = (-40)
#the_plasma_generator.displacement_max = 40
#the_plasma_generator.reinitialize_corners(uleft_corner=135, uright_corner=245, lleft_corner=135, lright_corner=135)
#### Execution ####
## Initialize the screen
screen = pygame.display.set_mode(SCREEN_SIZE)
## Window title
pygame.display.set_caption(WINDOW_CAPTION)
## Create a clock object to make the game run at a specified speed in the main loop
clock = pygame.time.Clock()
## To keep the game running
keep_window_open = True
## DEBUG: Leaving these lines in will cause the generator to make one pass before the main loop. Combine this with commenting the regeneration of the map in the main loop to get a single map displayed for the duration of the program. cfref:one-pass
#the_map_of_noise = []
#the_grand_map = []
#the_map_of_noise = the_simplex_generator.generate_noise(128, 128)
#the_grand_test_map = convert_noise_map_to_maptile_map(the_map_of_noise)
#print(str(the_map_of_noise))
## The hash iterator moves the hash value inside the simplex noise generator through all 256 points in its range.
## IMPORTANT! This makes it NOT produce terrain-like noise. What it produces is more predictable.
## The place to put in unpredictability is in the NUMBER SHEET the simplex generator is seeded with, NOT the hash value.
## De-comment this to see it slide through the increasingly predictable patterns of the shifted hash sheet. cfref:hash-iterator
#debugging_hash_iterator = 0
#### Main Loop ####
while keep_window_open == True:
## DEBUG COMMENT TO SEE ONLY ONE PASS cfref:one-pass
the_map_of_noise = []
the_grand_test_map = []
## The hash iterator moves the hash value inside the simplex noise generator through all 256 points in its range.
## IMPORTANT! This makes it NOT produce terrain-like noise. What it produces is more predictable.
## The place to put in unpredictability is in the NUMBER SHEET the simplex generator is seeded with, NOT the hash value.
## De-comment these lines to see it slide through the increasingly predictable patterns of the shifted hash sheet. cfref:hash-iterator
#the_simplex_generator.hash_number = debugging_hash_iterator
#if debugging_hash_iterator < 255:
# debugging_hash_iterator += 1
#else:
# debugging_hash_iterator = 0
#### Regenerating the noise ####
## Multiple generators could be used for multiple purposes, but in this demonstration program only one is needed at a time.
## If using the Mk II dungeon map generator:
#the_map_of_noise = the_mk_ii_dungeon_map_generator.generate_noise(supplied_map_width=NOISE_WIDTH, supplied_map_height=NOISE_HEIGHT, room_max_size=ROOM_MAX_SIZE, room_min_size=ROOM_MIN_SIZE, room_max_count=ROOM_MAX_COUNT, room_min_count=ROOM_MIN_COUNT)
## If using a room-filled map generator:
#the_map_of_noise = the_room_filled_map_generator.generate_noise(supplied_map_width=NOISE_WIDTH, supplied_map_height=NOISE_HEIGHT, room_max_size=ROOM_MAX_SIZE, room_min_size=ROOM_MIN_SIZE)
## If using a dungeon map generator:
#the_map_of_noise = the_dungeon_generator.generate_noise(supplied_map_width=NOISE_WIDTH, supplied_map_height=NOISE_HEIGHT, room_max_size=ROOM_MAX_SIZE, room_min_size=ROOM_MIN_SIZE, room_max_count=ROOM_MAX_COUNT, room_min_count=ROOM_MIN_COUNT)
## If using a simplex noise generator:
#the_map_of_noise = the_simplex_generator.generate_noise(NOISE_WIDTH, NOISE_HEIGHT, NOISE_FREQUENCY, NOISE_OCTAVES, NOISE_PERSISTENCE)
## If using a Perlin noise generator: cfref:perlin-gen
the_map_of_noise = the_perlin_generator.generate_noise(NOISE_WIDTH, NOISE_HEIGHT, NOISE_FREQUENCY, NOISE_OCTAVES)
## If using a plasma fractal generator:
#the_map_of_noise = the_plasma_generator.generate_noise(x=0, y=0, supplied_width=NOISE_WIDTH, supplied_height=NOISE_HEIGHT)
## DEBUG COMMENT TO SEE ONLY ONE PASS cfref:one-pass
the_grand_test_map = convert_noise_map_to_maptile_map(the_map_of_noise)
## Process keyboard input
keep_window_open = handle_keys(keep_window_open)
## Game speed and event progression metering. Measured in maximum permissible frames per second.
clock.tick(30)
#clock.tick(1)
## Graphical display for much human friendly.
render_all(the_grand_test_map)
## DEBUG COMMENT TO SEE ONLY ONE PASS cfref:one-pass
del the_map_of_noise
del the_grand_test_map
# "Be IDLE friendly," they said.
pygame.quit
| BFriedland/TerrainGenerators | PerlinGeneratorDemo.py | Python | mit | 17,655 | 0.020164 |
# -*- coding: utf-8 -*-
from uamobile.factory.base import AbstractUserAgentFactory
from uamobile.ezweb import EZwebUserAgent
from uamobile.parser import CachingEZwebUserAgentParser
class EZwebUserAgentFactory(AbstractUserAgentFactory):
device_class = EZwebUserAgent
parser = CachingEZwebUserAgentParser()
| csakatoku/uamobile | uamobile/factory/ezweb.py | Python | mit | 321 | 0.009346 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions.
"""
import ConfigParser
import socket
import urllib2
def bash_quote(text):
"""Quotes a string for bash, by using single quotes."""
if text == None:
return ""
return "'%s'" % text.replace("'", "'\\''")
def bash_quote_env(env):
"""Quotes the value in an environment variable assignment."""
if env.find("=") == -1:
return env
(var, value) = env.split("=")
return "%s=%s" % (var, bash_quote(value))
def build_env_string(env_strings=[], pairs={}):
"""Build a bash environment variable assignment"""
env = ''
if env_strings:
for env_string in env_strings:
env += "%s " % bash_quote_env(env_string)
if pairs:
for key, val in pairs.items():
env += "%s=%s " % (key, bash_quote(val))
return env[:-1]
def merge_config_with_options(section_name, config, options):
"""
Merge configuration options with a dictionary of options.
Keys in the options dictionary take precedence.
"""
res = {}
try:
for (key, value) in config.items(section_name):
if value.find("\n") != -1:
res[key] = value.split("\n")
else:
res[key] = value
except ConfigParser.NoSectionError:
pass
for key in options:
if options[key] != None:
res[key] = options[key]
return res
def url_get(url, timeout=10, retries=0):
"""
Retrieve content from the given URL.
"""
# in Python 2.6 we can pass timeout to urllib2.urlopen
socket.setdefaulttimeout(timeout)
attempts = 0
while True:
try:
return urllib2.urlopen(url).read()
except urllib2.URLError:
attempts = attempts + 1
if attempts > retries:
raise
def xstr(string):
"""Sane string conversion: return an empty string if string is None."""
return '' if string is None else str(string)
| ZhangXFeng/hadoop | src/hadoop-mapreduce1-project/src/contrib/cloud/src/py/hadoop/cloud/util.py | Python | apache-2.0 | 2,562 | 0.016393 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import connection
from django.conf import settings
from django.utils import timezone
from taiga.projects.history import services as history_service
from taiga.projects.history.choices import HistoryType
from . import tasks
def _get_project_webhooks(project):
webhooks = []
for webhook in project.webhooks.all():
webhooks.append({
"id": webhook.pk,
"url": webhook.url,
"key": webhook.key,
})
return webhooks
def on_new_history_entry(sender, instance, created, **kwargs):
if not settings.WEBHOOKS_ENABLED:
return None
if instance.is_hidden:
return None
model = history_service.get_model_from_key(instance.key)
pk = history_service.get_pk_from_key(instance.key)
try:
obj = model.objects.get(pk=pk)
except model.DoesNotExist:
# Catch simultaneous DELETE request
return None
webhooks = _get_project_webhooks(obj.project)
if instance.type == HistoryType.create:
task = tasks.create_webhook
extra_args = []
elif instance.type == HistoryType.change:
task = tasks.change_webhook
extra_args = [instance]
elif instance.type == HistoryType.delete:
task = tasks.delete_webhook
extra_args = []
by = instance.owner
date = timezone.now()
webhooks_args = []
for webhook in webhooks:
args = [webhook["id"], webhook["url"], webhook["key"], by, date, obj] + extra_args
webhooks_args.append(args)
connection.on_commit(lambda: _execute_task(task, webhooks_args))
def _execute_task(task, webhooks_args):
for webhook_args in webhooks_args:
if settings.CELERY_ENABLED:
task.delay(*webhook_args)
else:
task(*webhook_args)
| taigaio/taiga-back | taiga/webhooks/signal_handlers.py | Python | agpl-3.0 | 2,526 | 0.000396 |
from caffe import io as c
import numpy as np
import os,sys
if len(sys.argv) < 3:
print 'Use: convertProtobinToNumpy protobinFile numpyOutput'
sys.exit()
protoData = c.caffe_pb2.BlobProto()
f = open(sys.argv[1],'rb')
protoData.ParseFromString(f.read())
f.close()
array = c.blobproto_to_array(protoData)
np.save(sys.argv[2],array[0].swapaxes(1, 0).swapaxes(2,1)[:, :, ::-1])
A = np.load(sys.argv[2]+'.npy')
print 'Final matrix shape:',A.shape
| jccaicedo/localization-agent | learn/cnn/convertProtobinToNumpy.py | Python | mit | 447 | 0.017897 |
"""
# TOP2049 Open Source programming suite
#
# Atmel AT89C2051 DIP20 Support
#
# Copyright (c) 2010 Guido
# Copyright (c) 2010 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from libtoprammer.chip import *
class Chip_AT89C2051dip20(Chip):
STAT_BUSY = 0x01 # Programmer is running a command
STAT_ERR = 0x02 # Error during write
def __init__(self):
Chip.__init__(self,
chipPackage = "DIP20",
chipPinVCC = 20,
chipPinsVPP = 1,
chipPinGND = 10)
def __initChip(self):
self.applyVCC(False)
self.applyVPP(False)
self.applyGND(True)
self.top.cmdSetVCCVoltage(5)
self.top.cmdSetVPPVoltage(5)
def readSignature(self):
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.top.cmdSetVPPVoltage(5)
self.__loadCommand(5) # VPP on
self.__loadCommand(1) # set P3.2
self.__setP3x(P33=0, P34=0, P35=0, IA=0)
data = b""
self.top.cmdFPGARead(0x10)
self.__setP3x(P33=0, P34=0, P35=0, IA=1)
self.__setP3x(P33=0, P34=0, P35=0, IA=0)
self.top.cmdFPGARead(0x10)
self.__setP3x(P33=0, P34=0, P35=0, IA=1)
self.__setP3x(P33=0, P34=0, P35=0, IA=0)
self.top.cmdFPGARead(0x10)
data += self.top.cmdReadBufferReg()
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(6) # VPP off
signature = b""
signature += int2byte(data[0])
signature += int2byte(data[1])
self.top.printInfo("Signature: %X, %X" % (byte2int(signature[0]), byte2int(signature[1])))
return signature
def erase(self):
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.__loadCommand(1) # set P3.2
self.top.cmdSetVPPVoltage(5)
self.applyVPP(True)
self.__loadCommand(5) # VPP on
self.__setP3x(P33=1, P34=0, P35=0, IA=0)
self.top.cmdSetVPPVoltage(12)
self.__runCommandSync(4)
self.applyVPP(False)
self.top.cmdSetVPPVoltage(5)
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(5) # VPP off
self.top.flushCommands()
self.top.printInfo("at89c2051dip20: Erasing flash, verifying ...")
ok = self.__verifyErase()
if ok == 0:
self.top.printInfo("at89c2051dip20: Erase done.")
else:
self.top.printInfo("at89c2051dip20: Erase failed!")
def readProgmem(self):
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.__loadCommand(1) # set P3.2
self.top.cmdSetVPPVoltage(5)
self.applyVPP(True)
self.__loadCommand(5) # VPP on
self.__setP3x(P33=0, P34=0, P35=1, IA=0)
image = b""
byteCount = 0
self.progressMeterInit("Reading Flash", 0x800)
for addr in range(0, 0x800):
self.progressMeter(addr)
self.top.cmdFPGARead(0x10)
self.__setP3x(P33=0, P34=0, P35=1, IA=1)
self.__setP3x(P33=0, P34=0, P35=1, IA=0)
byteCount += 1
if byteCount == self.top.getBufferRegSize():
image += self.top.cmdReadBufferReg(byteCount)
byteCount = 0
image += self.top.cmdReadBufferReg(byteCount)
self.applyVPP(False)
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(5) # VPP off
self.top.flushCommands()
self.progressMeterFinish()
return image
def writeProgmem(self, image):
if len(image) > 0x800:
self.throwError("Invalid EPROM image size %d (expected <=%d)" %\
(len(image), 0x800))
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.__loadCommand(1) # set P3.2
self.top.cmdSetVPPVoltage(5)
self.applyVPP(True)
self.__loadCommand(5) # VPP on
self.__setP3x(P33=0, P34=1, P35=1, IA=0)
self.top.cmdSetVPPVoltage(12)
self.progressMeterInit("Writing Flash", len(image))
for addr in range(0, len(image)):
self.progressMeter(addr)
data = byte2int(image[addr])
if data != 0xFF:
self.__loadData(data)
self.__loadCommand(3)
ok = self.__progWait()
if (ok & self.STAT_ERR) != 0:
self.throwError("Write byte failed.")
self.__setP3x(P33=0, P34=1, P35=1, IA=1)
self.__setP3x(P33=0, P34=1, P35=1, IA=0)
self.applyVPP(False)
self.top.cmdSetVPPVoltage(5)
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(5) # VPP off
self.top.flushCommands()
self.progressMeterFinish()
ok = self.__verifyProgmem(image)
if ok == 0:
self.top.printInfo("at89c2051dip20: Write flash done.")
else:
self.top.printInfo("at89c2051dip20: Write flash failed!")
def __verifyErase(self):
ok = 0
image = self.readProgmem()
for addr in range(0, 0x800):
if byte2int(image[addr]) != 0xFF:
ok = 1
return ok
def __verifyProgmem(self,image):
data = self.readProgmem()
ok = 0
for addr in range(0, 0x800):
if byte2int(image[addr]) != byte2int(data[addr]):
ok = 1
return ok
def __loadData(self, data):
self.top.cmdFPGAWrite(0x10, data)
def __loadCommand(self, command):
self.top.cmdFPGAWrite(0x12, command & 0xFF)
def __runCommandSync(self, command):
self.__loadCommand(command)
self.__busyWait()
def __setP3x(self, P33, P34, P35, IA):
data = 0
if P33:
data |= 1
if P34:
data |= 2
if P35:
data |= 4
if IA:
data |= 8
self.top.cmdFPGAWrite(0x16, data)
def __getStatusFlags(self):
self.top.cmdFPGARead(0x12)
stat = self.top.cmdReadBufferReg()
return byte2int(stat[0])
def __busy(self):
return bool(self.__getStatusFlags() & self.STAT_BUSY)
def __busyWait(self):
for i in range(0, 26):
if not self.__busy():
return
self.top.hostDelay(0.001)
self.throwError("Timeout in busywait.")
def __progWait(self):
for i in range(0,4):
self.top.cmdFPGARead(0x12)
stat = self.top.cmdReadBufferReg()
if (byte2int(stat[0]) & self.STAT_BUSY) == 0:
return byte2int(stat[0])
self.top.hostDelay(0.001)
self.throwError("Timeout in busywait.")
ChipDescription(
Chip_AT89C2051dip20,
bitfile = "at89c2051dip20",
runtimeID = (0x0005, 0x01),
chipVendors = "Atmel",
description = "AT89C2051",
maintainer = None,
packages = ( ("DIP20", ""), )
)
| mbuesch/toprammer | libtoprammer/chips/at89c2051dip20.py | Python | gpl-2.0 | 6,509 | 0.036104 |
# -*- coding: utf-8 -*-
'''
Module to manage Linux kernel modules
'''
# Import python libs
import os
import re
# Import salt libs
import salt.utils
def __virtual__():
'''
Only runs on Linux systems
'''
return 'kmod' if __grains__['kernel'] == 'Linux' else False
def _new_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an lsmod dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return post - pre
def _rm_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an lsmod dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return pre - post
def _union_module(a, b):
'''
Return union of two list where duplicated items are only once
'''
return list(set(a) | set(b))
def _get_modules_conf():
'''
Return location of modules config file.
Default: /etc/modules
'''
if __grains__['os'] == 'Arch':
return '/etc/modules-load.d/salt_managed.conf'
return '/etc/modules'
def _strip_module_name(mod):
'''
Return module name and strip configuration. It is possible insert modules
in this format:
bonding mode=4 miimon=1000
This method return only 'bonding'
'''
if mod.strip() == '':
return False
return mod.split()[0]
def _set_persistent_module(mod):
'''
Add module to configuration file to make it persistent. If module is
commented uncomment it.
'''
conf = _get_modules_conf()
if not os.path.exists(conf):
__salt__['file.touch'](conf)
mod_name = _strip_module_name(mod)
if not mod_name or mod_name in mod_list(True) or mod_name not in available():
return set()
escape_mod = re.escape(mod)
## If module is commented only uncomment it
if __salt__['file.contains_regex_multiline'](conf, "^#[\t ]*{}[\t ]*$".format(escape_mod)):
__salt__['file.uncomment'](conf, escape_mod)
else:
__salt__['file.append'](conf, mod)
return set([mod_name])
def _remove_persistent_module(mod, comment):
'''
Remove module from configuration file. If comment is true only comment line
where module is.
'''
conf = _get_modules_conf()
mod_name = _strip_module_name(mod)
if not mod_name or mod_name not in mod_list(True):
return set()
escape_mod = re.escape(mod)
if comment:
__salt__['file.comment'](conf, "^[\t ]*{}[\t ]?".format(escape_mod))
else:
__salt__['file.sed'](conf, "^[\t ]*{}[\t ]?".format(escape_mod), '')
return set([mod_name])
def available():
'''
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
'''
ret = []
mod_dir = os.path.join('/lib/modules/', os.uname()[2])
for root, dirs, files in os.walk(mod_dir):
for fn_ in files:
if '.ko' in fn_:
ret.append(fn_[:fn_.index('.ko')])
return sorted(list(ret))
def check_available(mod):
'''
Check to see if the specified kernel module is available
CLI Example:
.. code-block:: bash
salt '*' kmod.check_available kvm
'''
return mod in available()
def lsmod():
'''
Return a dict containing information about currently loaded modules
CLI Example:
.. code-block:: bash
salt '*' kmod.lsmod
'''
ret = []
for line in __salt__['cmd.run']('lsmod').splitlines():
comps = line.split()
if not len(comps) > 2:
continue
if comps[0] == 'Module':
continue
mdat = {
'size': comps[1],
'module': comps[0],
'depcount': comps[2],
}
if len(comps) > 3:
mdat['deps'] = comps[3].split(',')
else:
mdat['deps'] = []
ret.append(mdat)
return ret
def mod_list(only_persist=False):
'''
Return a list of the loaded module names
CLI Example:
.. code-block:: bash
salt '*' kmod.mod_list
'''
mods = set()
if only_persist:
conf = _get_modules_conf()
if os.path.exists(conf):
with salt.utils.fopen(conf, 'r') as modules_file:
for line in modules_file:
line = line.strip()
mod_name = _strip_module_name(line)
if not line.startswith('#') and mod_name:
mods.add(mod_name)
else:
for mod in lsmod():
mods.add(mod['module'])
return sorted(list(mods))
def load(mod, persist=False):
'''
Load the specified kernel module
mod
Name of module to add
persist
Write module to /etc/modules to make it load on system reboot
CLI Example:
.. code-block:: bash
salt '*' kmod.load kvm
'''
pre_mods = lsmod()
response = __salt__['cmd.run_all']('modprobe {0}'.format(mod))
if response['retcode'] == 0:
post_mods = lsmod()
mods = _new_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _set_persistent_module(mod)
return sorted(list(mods | persist_mods))
else:
return 'Module {0} not found'.format(mod)
def is_loaded(mod):
'''
Check to see if the specified kernel module is loaded
CLI Example:
.. code-block:: bash
salt '*' kmod.is_loaded kvm
'''
return mod in mod_list()
def remove(mod, persist=False, comment=True):
'''
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /etc/modules
comment
If persist is set don't remove line from /etc/modules but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove kvm
'''
pre_mods = lsmod()
__salt__['cmd.run_all']('modprobe -r {0}'.format(mod))
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
| victorywang80/Maintenance | saltstack/src/salt/modules/kmod.py | Python | apache-2.0 | 6,443 | 0.000466 |
import numpy as np
def point_in_hull(point, hull, tolerance=1e-12):
return all((np.dot(eq[:-1], point) + eq[-1] <= tolerance) for eq in hull.equations)
def n_points_in_hull(points, hull):
n_points = 0
for i in range(points.shape[0]):
if point_in_hull(points[i, :], hull):
n_points = n_points + 1
return n_points
def are_in_hull(points, hull):
ins = []
outs = []
for i in range(points.shape[0]):
if point_in_hull(points[i, :], hull):
ins.append(i)
else:
outs.append(i)
return ins, outs
| ethz-asl/segmatch | segmappy/segmappy/tools/hull.py | Python | bsd-3-clause | 583 | 0.001715 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is a plugin for the Shinken Broker. It is in charge
# to brok information of the service perfdata into the file
# var/service-perfdata
# So it just manage the service_check_return
# Maybe one day host data will be useful too
# It will need just a new file, and a new manager :)
import codecs
from shinken.basemodule import BaseModule
properties = {
'daemons': ['broker'],
'type': 'service_perfdata',
'phases': ['running'],
}
# called by the plugin manager to get a broker
def get_instance(plugin):
print "Get a Service Perfdata broker for plugin %s" % plugin.get_name()
# Catch errors
path = plugin.path
if hasattr(plugin, 'mode'):
mode = plugin.mode
else:
mode = 'a'
if hasattr(plugin, 'template'):
template = plugin.template
else:
template = "$LASTSERVICECHECK$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEOUTPUT$\t$SERVICESTATE$\t$SERVICEPERFDATA$\n"
# int(data['last_chk']),data['host_name'], data['service_description'], data['output'], current_state, data['perf_data']
instance = Service_perfdata_broker(plugin, path, mode, template)
return instance
# Class for the Merlindb Broker
# Get broks and puts them in merlin database
class Service_perfdata_broker(BaseModule):
def __init__(self, modconf, path, mode, template):
BaseModule.__init__(self, modconf)
self.path = path
self.mode = mode
self.template = template
# Make some raw change
self.template = self.template.replace(r'\t', '\t')
self.template = self.template.replace(r'\n', '\n')
# In Nagios it's said to force a return in line
if not self.template.endswith('\n'):
self.template += '\n'
self.buffer = []
# Called by Broker so we can do init stuff
# TODO: add conf param to get pass with init
# Conf from arbiter!
def init(self):
print "[%s] I open the service-perfdata file '%s'" % (self.name, self.path)
# Try to open the file to be sure we can
self.file = codecs.open(self.path, self.mode, "utf-8")
self.file.close()
# We've got a 0, 1, 2 or 3 (or something else? ->3
# And want a real OK, WARNING, CRITICAL, etc...
def resolve_service_state(self, state):
states = {0: 'OK', 1: 'WARNING', 2: 'CRITICAL', 3: 'UNKNOWN'}
if state in states:
return states[state]
else:
return 'UNKNOWN'
# A service check have just arrived, we UPDATE data info with this
def manage_service_check_result_brok(self, b):
data = b.data
# The original model
# "$TIMET\t$HOSTNAME\t$SERVICEDESC\t$OUTPUT\t$SERVICESTATE\t$PERFDATA\n"
current_state = self.resolve_service_state(data['state_id'])
macros = {
'$LASTSERVICECHECK$': int(data['last_chk']),
'$HOSTNAME$': data['host_name'],
'$SERVICEDESC$': data['service_description'],
'$SERVICEOUTPUT$': data['output'],
'$SERVICESTATE$': current_state,
'$SERVICEPERFDATA$': data['perf_data'],
'$LASTSERVICESTATE$': data['last_state'],
}
s = self.template
for m in macros:
#print "Replacing in %s %s by %s" % (s, m, str(macros[m]))
s = s.replace(m, unicode(macros[m]))
#s = "%s\t%s\t%s\t%s\t%s\t%s\n" % (int(data['last_chk']),data['host_name'], \
# data['service_description'], data['output'], \
# current_state, data['perf_data'] )
self.buffer.append(s)
# Each second the broker say it's a new second. Let use this to
# dump to the file
def hook_tick(self, brok):
# Go to write it :)
buf = self.buffer
self.buffer = []
try:
self.file = codecs.open(self.path, self.mode, "utf-8")
for s in buf:
self.file.write(s)
self.file.flush()
self.file.close()
except IOError, exp: # Maybe another tool is just getting it, pass
pass
| shinken-monitoring/mod-perfdata-service | module/module.py | Python | agpl-3.0 | 5,056 | 0.001978 |
import json
from bson.errors import InvalidId
from flask import Blueprint, jsonify, request, g
item_api = Blueprint('itemApi', __name__)
def get_item_as_object(item) -> dict:
return_item = {
"_id": str(item['_id']),
"name": item['name'],
"description": item['description'],
"imageURL": item['imageURL'],
"price": item['price'],
"calories": item['calories'],
"category": item['category'],
"tags": item['tags']
}
if 'isRecommended' in item:
return_item['isRecommended'] = item['isRecommended']
return return_item
@item_api.route('/item', methods=['GET'])
def get_all_items() -> tuple:
"""
swagger_from_file: ../swagger/item/getItems.yml
returns all the items as a json array
:return:
"""
from hopkin.models.items import Item
# get all items
items = Item.get_all()
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
@item_api.route('/item/id/<item_id>', methods=['GET'])
def get_item_by_id(item_id) -> tuple:
"""
swagger_from_file: ../swagger/item/getItem.yml
returns one item as a json array
:return:
"""
from hopkin.models.items import Item
# find specific item
item = Item.get_by_id(item_id)
return jsonify({'data': {'item': get_item_as_object(item)}})
@item_api.route('/item/category/<category>', methods=['GET'])
def get_item_by_category(category) -> tuple:
"""
swagger_from_file: ../swagger/item/getItemsByCategory.yml
returns all the items in a category as a json array
:return:
"""
from hopkin.models.items import Item
# find items by category
items = Item.get_by_category(category)
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
@item_api.route('/item/category/<category>/count', methods=['GET'])
def get_category_count(category) -> tuple:
"""
swagger_from_file: ../swagger/item/getNumItemsInCat.yml
Returns the number items in that category
:param category:
:return:
"""
json_response = get_item_by_category(category)
return jsonify({'data': {'count': len(json.loads(json_response.data)['data']['items'])}})
@item_api.route('/item/search', methods=['GET'])
def search_item() -> tuple:
"""
swagger_from_file: ../swagger/item/searchItem.yml
Searches items if query less that 3
it only searches the name else it will
search the names and tags
:return:
"""
from hopkin.models.items import Item
items_list = []
query: str = request.args['q']
if not len(query) > 0:
return jsonify({'error': 'no search results provided'})
query = query.title()
items = list(Item.get_by_name_search(query.lower()))
if len(query) > 3:
items = items + list(Item.get_by_tag_starts_with(query.lower()))
unique_ids = []
for item in items:
if str(item['_id']) not in unique_ids:
items_list.append({
"_id": str(item['_id']),
"name": item['name'],
"description": item['description'],
"imageURL": item['imageURL'],
"price": item['price'],
"calories": item['calories'],
"category": item['category'],
"tags": item['tags'],
"isRecommended": item['isRecommended']
})
unique_ids.append(str(item['_id']))
return jsonify({'data': {'items': items_list}})
@item_api.route('/rate/item/<itemid>', methods=['GET'])
def get_rating(itemid: str) -> tuple:
"""
swagger_from_file: ../swagger/item/getItemRating.yml
Gets a user rating of an item
:param itemid:
:return:
"""
from hopkin.models.ratings import Rating
user_id = str(g.user_id)
rating = Rating.get_rating(itemid, user_id)
if rating is None:
return jsonify({
'error': {'error': {'message': 'No Rating for item'}}
})
return jsonify({'data': {'rating': {
'item_id': rating['item_id'],
'rating': rating['rating'],
}}})
@item_api.route('/rate/item', methods=['POST'])
def rate_item() -> tuple:
"""
Adds a user rating of an item
:return:
"""
from hopkin.models.items import Item
from hopkin.models.ratings import Rating
if request.json is None:
return jsonify({'error': 'invalid request'})
try:
item_id = Item.get_by_id(request.json['itemid'])
if item_id is None:
return jsonify({'error': f"No item with id: {request.json['itemid']} found"}), 400
elif request.json['rating'] > 5:
return jsonify({'error': 'rating can\'t be grater than 5'}), 400
except InvalidId:
return jsonify({'error': 'Invalid item id format'}), 400
user_id = str(g.user_id)
rating = Rating.get_rating(request.json['itemid'], user_id)
if rating is None:
Rating.save({
'item_id': request.json['itemid'],
'user_id': user_id,
'rating': request.json['rating']
})
return jsonify({'data': {'success': True, 'message': 'new rating added'}})
rating['item_id'] = request.json['itemid']
rating['user_id'] = user_id
Rating.update(rating)
return jsonify({'data': {'success': True, 'message': 'rating updated'}})
@item_api.route('/admin/item/add', methods=['POST'])
def add_new_item() -> tuple:
"""
swagger_from_file: ../swagger/item/itemAdd.yml
adds an item to the database and returns it in a JSON object
:return:
"""
from hopkin.models.items import Item
if request.json is not None and g.is_admin:
new_item = {
'name': request.json['name'],
'description': request.json['description'],
'imageURL': request.json['imageURL'],
'price': request.json['price'],
'calories': request.json['calories'],
'category': request.json['category'],
'tags': request.json['tags'],
"isRecommended": request.json['isRecommended']
}
new_item_id = Item.insert(new_item)
return jsonify({'data': {'item': request.json, 'itemId': str(new_item_id)}})
return jsonify({'error': 'invalid item' + request.json}), 403
@item_api.route('/admin/item/delete/<item_id>', methods=['POST'])
def delete_item(item_id):
"""
swagger_from_file: ../swagger/item/deleteItem.yml
deletes the selected item from the database
:return:
"""
from hopkin.models.items import Item
# search for item by id
item = Item.get_by_id(str(item_id))
if item is not None and g.is_admin:
# remove item
Item.remove(item_id)
return jsonify({'data': {'success': True}})
return jsonify({'error': 'No item found with id ' + item_id})
@item_api.route('/admin/item/update', methods=['POST'])
def update_item():
"""
swagger_from_file: ../swagger/item/updateItem.yml
updated the selected item in the database
:return:
"""
from hopkin.models.items import Item
if request.json is not None:
item_update = Item.get_by_id(request.json['_id'])
item_update['calories'] = request.json['calories']
item_update['category'] = request.json['category']
item_update['description'] = request.json['description']
# will be updated to get base64 image
item_update['imageURL'] = request.json['imageURL']
item_update['name'] = request.json['name']
item_update['price'] = request.json['price']
item_update['tags'] = request.json['tags']
item_update['isRecommended'] = request.json['isRecommended']
Item.save(item_update)
return jsonify({'data': {'message': 'Updated with item id: ' + str(item_update['_id']),
'mongo_id': str(item_update['_id'])}
})
return jsonify({'error': 'item not updated'})
@item_api.route('/item/recommendations', methods=['GET'])
def get_recommendations() -> tuple:
"""
swagger_from_file: ../swagger/item/getRecommended.yml
returns all the items as a json array
:return:
"""
from hopkin.models.items import Item
# get all items
items = Item.get_recommended()
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
| project-hopkins/Westworld | hopkin/routes/items.py | Python | gpl-3.0 | 8,707 | 0.001034 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_diplomat_zabrak_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","zabrak_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/mobile/shared_dressed_diplomat_zabrak_male_01.py | Python | mit | 457 | 0.045952 |
"""Version information."""
__version__ = '0.0.2'
| kstaniek/pampio | pampio/version.py | Python | apache-2.0 | 50 | 0 |
# Copyright 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fastfood Chef Cookbook manager."""
from __future__ import print_function
import os
from fastfood import utils
class CookBook(object):
"""Chef Cookbook object.
Understands metadata.rb, Berksfile and how to parse them.
"""
def __init__(self, path):
"""Initialize CookBook wrapper at 'path'."""
self.path = utils.normalize_path(path)
self._metadata = None
if not os.path.isdir(path):
raise ValueError("Cookbook dir %s does not exist."
% self.path)
self._berksfile = None
@property
def name(self):
"""Cookbook name property."""
try:
return self.metadata.to_dict()['name']
except KeyError:
raise LookupError("%s is missing 'name' attribute'."
% self.metadata)
@property
def metadata(self):
"""Return dict representation of this cookbook's metadata.rb ."""
self.metadata_path = os.path.join(self.path, 'metadata.rb')
if not os.path.isfile(self.metadata_path):
raise ValueError("Cookbook needs metadata.rb, %s"
% self.metadata_path)
if not self._metadata:
self._metadata = MetadataRb(open(self.metadata_path, 'r+'))
return self._metadata
@property
def berksfile(self):
"""Return this cookbook's Berksfile instance."""
self.berks_path = os.path.join(self.path, 'Berksfile')
if not self._berksfile:
if not os.path.isfile(self.berks_path):
raise ValueError("No Berksfile found at %s"
% self.berks_path)
self._berksfile = Berksfile(open(self.berks_path, 'r+'))
return self._berksfile
class MetadataRb(utils.FileWrapper):
"""Wrapper for a metadata.rb file."""
@classmethod
def from_dict(cls, dictionary):
"""Create a MetadataRb instance from a dict."""
cookbooks = set()
# put these in order
groups = [cookbooks]
for key, val in dictionary.items():
if key == 'depends':
cookbooks.update({cls.depends_statement(cbn, meta)
for cbn, meta in val.items()})
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
@staticmethod
def depends_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'depends' statement for the metadata.rb file."""
line = "depends '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Stencil dependency options for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
if metadata:
line = "%s '%s'" % (line, "', '".join(metadata))
return line
def to_dict(self):
"""Return a dictionary representation of this metadata.rb file."""
return self.parse()
def parse(self):
"""Parse the metadata.rb into a dict."""
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
depends = {}
for line in data:
if not len(line) == 2:
continue
key, value = line
if key == 'depends':
value = value.split(',')
lib = utils.ruby_strip(value[0])
detail = [utils.ruby_strip(j) for j in value[1:]]
depends[lib] = detail
datamap = {key: utils.ruby_strip(val) for key, val in data}
if depends:
datamap['depends'] = depends
self.seek(0)
return datamap
def merge(self, other):
"""Add requirements from 'other' metadata.rb into this one."""
if not isinstance(other, MetadataRb):
raise TypeError("MetadataRb to merge should be a 'MetadataRb' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
meta_writelines = ['%s\n' % self.depends_statement(cbn, meta)
for cbn, meta in new.get('depends', {}).items()
if cbn not in current.get('depends', {})]
self.write_statements(meta_writelines)
return self.to_dict()
class Berksfile(utils.FileWrapper):
"""Wrapper for a Berksfile."""
berks_options = [
'branch',
'git',
'path',
'ref',
'revision',
'tag',
]
def to_dict(self):
"""Return a dictionary representation of this Berksfile."""
return self.parse()
def parse(self):
"""Parse this Berksfile into a dict."""
self.flush()
self.seek(0)
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
datamap = {}
for line in data:
if len(line) == 1:
datamap[line[0]] = True
elif len(line) == 2:
key, value = line
if key == 'cookbook':
datamap.setdefault('cookbook', {})
value = [utils.ruby_strip(v) for v in value.split(',')]
lib, detail = value[0], value[1:]
datamap['cookbook'].setdefault(lib, {})
# if there is additional dependency data but its
# not the ruby hash, its the version constraint
if detail and not any("".join(detail).startswith(o)
for o in self.berks_options):
constraint, detail = detail[0], detail[1:]
datamap['cookbook'][lib]['constraint'] = constraint
if detail:
for deet in detail:
opt, val = [
utils.ruby_strip(i)
for i in deet.split(':', 1)
]
if not any(opt == o for o in self.berks_options):
raise ValueError(
"Cookbook detail '%s' does not specify "
"one of '%s'" % (opt, self.berks_options))
else:
datamap['cookbook'][lib][opt.strip(':')] = (
utils.ruby_strip(val))
elif key == 'source':
datamap.setdefault(key, [])
datamap[key].append(utils.ruby_strip(value))
elif key:
datamap[key] = utils.ruby_strip(value)
self.seek(0)
return datamap
@classmethod
def from_dict(cls, dictionary):
"""Create a Berksfile instance from a dict."""
cookbooks = set()
sources = set()
other = set()
# put these in order
groups = [sources, cookbooks, other]
for key, val in dictionary.items():
if key == 'cookbook':
cookbooks.update({cls.cookbook_statement(cbn, meta)
for cbn, meta in val.items()})
elif key == 'source':
sources.update({"source '%s'" % src for src in val})
elif key == 'metadata':
other.add('metadata')
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
@staticmethod
def cookbook_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'cookbook' statement for the Berksfile."""
line = "cookbook '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Berksfile dependency hash for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
# not like the others...
if 'constraint' in metadata:
line += ", '%s'" % metadata.pop('constraint')
for opt, spec in metadata.items():
line += ", %s: '%s'" % (opt, spec)
return line
def merge(self, other):
"""Add requirements from 'other' Berksfile into this one."""
if not isinstance(other, Berksfile):
raise TypeError("Berksfile to merge should be a 'Berksfile' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
berks_writelines = ['%s\n' % self.cookbook_statement(cbn, meta)
for cbn, meta in new.get('cookbook', {}).items()
if cbn not in current.get('cookbook', {})]
# compare and gather 'source' requirements
berks_writelines.extend(["source '%s'\n" % src for src
in new.get('source', [])
if src not in current.get('source', [])])
self.write_statements(berks_writelines)
return self.to_dict()
| samstav/fastfood | fastfood/book.py | Python | apache-2.0 | 10,125 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.